This file serves to be a supplementary document that describes all the statistics results performed for this project. It may help to test some new questions that are not included in the corresponding slides.
This file displays the results of the FaceWord project (data collected at NYU). There are two experiments in this project. In Experiment 1, Chinese participants viewed Chinese faces and characters in four conditions (Layout: intact, exchange [top and bottom parts were switched], top and bottom) and completed an additional localizer (Chinese faces, Chinese characters, objects, scrambled objects). In Experiment 2, English speakers viewed Chinese characters and English words in four conditions (Layout: intact, exchange, top [top parts of Chinese characters; left two letters for English words] and bottom [bottom parts of Chinese characters; right four letters for English words]) and completed an additional localizer (Caucasian faces, English words, objects, scrambled objects).
For the main runs, analysis is conducted for each ROI separately (FFA1, FFA2, VWFA, LOC).
For each ROI, three analyses are performed:
libsvm is used to decode different condition pairs (see below) and one-tail one-sample t-tests is used to test if the pair of conditions can be decoded [whether the accuracy is significantly larger than the chancel level (0.5); one-tail one-sample t-tests]. Leave-one(-run)-out cross-validation is applied. No normalized or demean were used.
The probability was estimated for each particiapnt separately:
libsvm) is trained with the patterns of intact vs. exchange (10 runs).# set the order of levels in factors
loc_order <- c("face", "object", "word", "scrambled")
faceword_order <- c("faces", "words")
words_order <- c("English", "Chinese")
layout_order <- c("intact", "exchange", "top", "bottom")
roi_order <- c("FFA1", "FFA2", "VWFA", "LO")
label_FFA1 <- c("roi.lh.f-vs-o.ffa1.label", "roi.rh.f-vs-o.ffa1.label")
label_FFA2 <- c("roi.lh.f-vs-o.ffa2.label", "roi.rh.f-vs-o.ffa2.label")
label_VWFA <- "roi.lh.word-vs-face-object-scrambled.label"
label_LO <- c("roi.lh.o-vs-scr.label", "roi.rh.o-vs-scr.label")
# criterion of vertex number
nVtx_size_min <- 30 # mm^2
# set up the theme for plot and rainclound plot
# load all the R files in "Utilities/"
tmp <- sapply(list.files('Utilities', "*.R", full.names = TRUE, recursive = TRUE), source)
activationUL <- 2.75
onesample0 <- 0.3 # the staring point of y axis (for one-sample t-tests)
nDigitals <- 3 # number of digitials of p-values in plots
pair_order_E1 <- c("face_intact-word_intact",
"face_intact-face_exchange",
"face_top-face_bottom",
"word_intact-word_exchange",
"word_top-word_bottom")
df_label <- read_csv(file.path("data", "faceword_E1_Label_HJ.csv")) %>%
mutate(roi = str_remove(Label, "roi."),
roi = str_remove(roi, ".label")) %>%
mutate(Subject = str_replace(SubjCode, "\\_.*", ""))
# df_label %>% head()
df_label %>%
select(SubjCode, roi, Size) %>%
pivot_wider(names_from = roi, values_from = Size) %>%
arrange(SubjCode)
The above table displays the size (in mm2) of each label for each participant. (NA denotes that this label is not available for that particiapnt.)
df_label %>%
select(SubjCode, roi, NVtxs) %>%
pivot_wider(names_from = roi, values_from = NVtxs) %>%
arrange(SubjCode)
The above table displays the number of vertices for each label and each participant. (NA denotes that this label is not available for that particiapnt.)
df_label %>%
group_by(Label, roi) %>%
summarize(Count = n(),
meanSize = mean(Size),
meanNVtx = mean(NVtxs))
df_nlabel <- df_label %>%
filter(Size > nVtx_size_min) %>%
group_by(Label, roi) %>%
summarize(Count = n(),
meanSize = mean(Size),
meanNVtx = mean(NVtxs))
df_nlabel
The above table dispalys the number of participants included in the following analyses for each ROI. (VWFA is only found on the left hemisphere.)
# load data file from functional scans for univerate analysis
df_uni_E1 <- read_csv(file.path("data", "faceword_E1_Uni_HJ.csv"))
head(df_uni_E1)
df_clean_uni_E1 <- {
df_uni_E1 %>%
filter(Response != "NaN") %>%
separate(Condition, c("FaceWord", "Layout"), "_") %>% # separate the conditions into two IVs
mutate(FaceWord = gsub("face", "faces", FaceWord),
FaceWord = gsub("word", "words", FaceWord),
Layout = factor(Layout, levels = layout_order), # convert the two IVs to factors
Hemisphere = if_else(grepl("lh", Label), "left", if_else(grepl("rh", Label), "right", "NA"))) %>%
select(Hemisphere, Label, SessCode, FaceWord, Layout, Response) %>%
mutate(Subject = str_replace(SessCode, "\\_.*", "")) %>%
left_join(df_label, by = c("Label", "Subject")) %>%
filter(Size > nVtx_size_min)
}
head(df_clean_uni_E1)
df_decode_E1 <- read_csv(file.path("data", "faceword_E1_Decode_noz.csv"))
head(df_decode_E1)
df_clean_decode_E1 <- df_decode_E1 %>%
select(Label, SessCode, ClassifyPair, ACC) %>%
mutate(Hemisphere = if_else(grepl("lh", Label), "left",
if_else(grepl("rh", Label), "right", "NA")),
Subject = str_remove(SessCode, "\\_.*")) %>%
left_join(df_label, by = c("Label", "Subject")) %>%
filter(Size > nVtx_size_min)
df_decode_acc_E1 <- df_clean_decode_E1 %>%
group_by(Hemisphere, Label, SessCode, ClassifyPair) %>% # divide the data into groups by these columns
summarize(Accuracy = mean(ACC), Count = n()) %>%
ungroup()
df_decode_acc_E1
df_simi <- read_csv(file.path("data", "faceword_E1_Similarity_noz.csv"))
head(df_simi)
df_clean_simi_E1 <- df_simi %>%
mutate(asExchange = if_else(grepl("exchange", PredictCond), 1, 0), # binary prediction
pExchange = Probability_2, # probability prediction
Subject = str_remove(SessCode, "\\_.*")) %>%
left_join(df_label, by = c("Label", "Subject")) %>%
filter(Size > nVtx_size_min)
df_rate_simi_E1 <- df_clean_simi_E1 %>%
group_by(SessCode, Label, ClassPair_1, Combination) %>%
summarize(binaryAsExchange = mean(asExchange),
pAsExchange = mean(pExchange),
RateAsExchange = pAsExchange) %>% # use the probability instead of the categorical prediction
ungroup() %>%
mutate(Hemisphere = if_else(grepl("lh", Label), 'left', if_else(grepl("rh", Label), "right", "NA")))
head(df_rate_simi_E1)
# only keep data for these two labels
df_uni_E1_FFA1 <- filter(df_clean_uni_E1, Label %in% label_FFA1)
df_decode_E1_FFA1 <- filter(df_decode_acc_E1, Label %in% label_FFA1)
df_simi_E1_FFA1 <- filter(df_rate_simi_E1, Label %in% label_FFA1)
df_uni_E1_FFA1 %>%
select(Hemisphere, Label, SessCode) %>%
distinct() %>%
group_by(Hemisphere, Label) %>%
summarize(Count = n())
anova_E1_lFFA1 <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA1, Label == label_FFA1[[1]]))
anova_E1_lFFA1
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 11 0.26 1.96 .03 .19
## 2 Layout 2.01, 22.10 0.04 7.62 ** .04 .003
## 3 FaceWord:Layout 2.45, 26.95 0.02 3.34 * .01 .04
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E1_lFFA1 <- emmeans(anova_E1_lFFA1, ~ FaceWord * Layout)
emm_aov_E1_lFFA1 %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E1_lFFA1, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## faces - words 0.147 0.105 11 1.400 0.1891
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E1_lFFA1, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.0644 0.0484 33 1.331 0.5501
## intact - top 0.2115 0.0484 33 4.373 0.0006
## intact - bottom 0.1574 0.0484 33 3.255 0.0133
## exchange - top 0.1471 0.0484 33 3.042 0.0226
## exchange - bottom 0.0930 0.0484 33 1.923 0.2381
## top - bottom -0.0541 0.0484 33 -1.118 0.6809
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E1_lFFA1 <- contrast(emm_aov_E1_lFFA1, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_anova_E1, interaction = "pairwise") # , adjust = "none"
contr_aov_E1_lFFA1
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words 0.21643 0.1165 16.4 1.857 0.0814
## exchange . faces - words 0.06828 0.1165 16.4 0.586 0.5659
## top . faces - words 0.25903 0.1165 16.4 2.223 0.0406
## bottom . faces - words 0.04451 0.1165 16.4 0.382 0.7074
## . faces intact - exchange 0.13845 0.0635 64.4 2.179 0.0330
## . faces intact - top 0.19017 0.0635 64.4 2.993 0.0039
## . faces intact - bottom 0.24335 0.0635 64.4 3.830 0.0003
## . faces exchange - top 0.05172 0.0635 64.4 0.814 0.4186
## . faces exchange - bottom 0.10490 0.0635 64.4 1.651 0.1036
## . faces top - bottom 0.05318 0.0635 64.4 0.837 0.4057
## . words intact - exchange -0.00969 0.0635 64.4 -0.153 0.8792
## . words intact - top 0.23277 0.0635 64.4 3.664 0.0005
## . words intact - bottom 0.07142 0.0635 64.4 1.124 0.2651
## . words exchange - top 0.24246 0.0635 64.4 3.816 0.0003
## . words exchange - bottom 0.08112 0.0635 64.4 1.277 0.2063
## . words top - bottom -0.16135 0.0635 64.4 -2.540 0.0135
2(face vs. word)$$2(intact vs. exchange) ANOVA
anova_E1_lFFA1_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA1,
Label == label_FFA1[[1]],
Layout %in% c("intact", "exchange")))
anova(anova_E1_lFFA1_ie, "pes")
emm_E1_lFFA1_ie <- emmeans(anova_E1_lFFA1_ie, ~ FaceWord + Layout)
(simple_E1_lFFA1_ie <- pairs(emm_E1_lFFA1_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words 0.21643 0.1188 14.4 1.822 0.0893
## exchange . faces - words 0.06828 0.1188 14.4 0.575 0.5743
## . faces intact - exchange 0.13845 0.0626 22.0 2.210 0.0378
## . words intact - exchange -0.00969 0.0626 22.0 -0.155 0.8784
2(face vs. word)$$2(top vs. bottom) ANOVA
anova_E1_lFFA1_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA1,
Label == label_FFA1[[1]],
Layout %in% c("top", "bottom")))
anova(anova_E1_lFFA1_tb, "pes")
emm_E1_lFFA1_tb <- emmeans(anova_E1_lFFA1_tb, ~ FaceWord + Layout)
(simple_E1_lFFA1_tb <- pairs(emm_E1_lFFA1_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## top . faces - words 0.2590 0.1142 13.1 2.268 0.0409
## bottom . faces - words 0.0445 0.1142 13.1 0.390 0.7030
## . faces top - bottom 0.0532 0.0531 21.3 1.001 0.3281
## . words top - bottom -0.1613 0.0531 21.3 -3.037 0.0062
anova_E1_rFFA1 <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA1, Label == label_FFA1[[2]]))
anova_E1_rFFA1
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 16 0.38 15.77 ** .13 .001
## 2 Layout 2.47, 39.55 0.05 4.74 ** .02 .010
## 3 FaceWord:Layout 2.40, 38.44 0.05 2.63 + .008 .08
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E1_rFFA1 <- emmeans(anova_E1_rFFA1, ~ FaceWord * Layout)
emm_aov_E1_rFFA1 %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E1_rFFA1, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## faces - words 0.422 0.106 16 3.972 0.0011
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E1_rFFA1, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.15160 0.0513 48 2.956 0.0240
## intact - top 0.16925 0.0513 48 3.301 0.0095
## intact - bottom 0.14972 0.0513 48 2.920 0.0264
## exchange - top 0.01765 0.0513 48 0.344 0.9858
## exchange - bottom -0.00188 0.0513 48 -0.037 1.0000
## top - bottom -0.01953 0.0513 48 -0.381 0.9810
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E1_rFFA1 <- contrast(emm_aov_E1_rFFA1, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_anova_E1, interaction = "pairwise") # , adjust = "none"
contr_aov_E1_rFFA1
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words 0.592937 0.1228 27.6 4.829 <.0001
## exchange . faces - words 0.339928 0.1228 27.6 2.768 0.0100
## top . faces - words 0.377181 0.1228 27.6 3.072 0.0047
## bottom . faces - words 0.376699 0.1228 27.6 3.068 0.0048
## . faces intact - exchange 0.278103 0.0719 96.0 3.869 0.0002
## . faces intact - top 0.277131 0.0719 96.0 3.856 0.0002
## . faces intact - bottom 0.257837 0.0719 96.0 3.587 0.0005
## . faces exchange - top -0.000972 0.0719 96.0 -0.014 0.9892
## . faces exchange - bottom -0.020266 0.0719 96.0 -0.282 0.7786
## . faces top - bottom -0.019294 0.0719 96.0 -0.268 0.7889
## . words intact - exchange 0.025094 0.0719 96.0 0.349 0.7278
## . words intact - top 0.061375 0.0719 96.0 0.854 0.3953
## . words intact - bottom 0.041599 0.0719 96.0 0.579 0.5641
## . words exchange - top 0.036282 0.0719 96.0 0.505 0.6149
## . words exchange - bottom 0.016506 0.0719 96.0 0.230 0.8189
## . words top - bottom -0.019776 0.0719 96.0 -0.275 0.7838
anova_E1_rFFA1_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA1,
Label == label_FFA1[[2]],
Layout %in% c("intact", "exchange")))
anova(anova_E1_rFFA1_ie, "pes")
emm_E1_rFFA1_ie <- emmeans(anova_E1_rFFA1_ie, ~ FaceWord + Layout)
(simple_E1_rFFA1_ie <- pairs(emm_E1_rFFA1_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words 0.5929 0.1086 26.1 5.462 <.0001
## exchange . faces - words 0.3399 0.1086 26.1 3.131 0.0043
## . faces intact - exchange 0.2781 0.0709 30.4 3.924 0.0005
## . words intact - exchange 0.0251 0.0709 30.4 0.354 0.7258
2(face vs. word)$$2(top vs. bottom) ANOVA
anova_E1_rFFA1_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA1,
Label == label_FFA1[[2]],
Layout %in% c("top", "bottom")))
anova(anova_E1_rFFA1_tb, "pes")
emm_E1_rFFA1_tb <- emmeans(anova_E1_rFFA1_tb, ~ FaceWord + Layout)
(simple_E1_rFFA1_tb <- pairs(emm_E1_rFFA1_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## top . faces - words 0.3772 0.1356 20.8 2.783 0.0112
## bottom . faces - words 0.3767 0.1356 20.8 2.779 0.0113
## . faces top - bottom -0.0193 0.0711 32.0 -0.272 0.7877
## . words top - bottom -0.0198 0.0711 32.0 -0.278 0.7826
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_aov_E1_lFFA1))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_FFA1 <- cbind(Hemisphere, rbind(as.data.frame(emm_aov_E1_lFFA1), as.data.frame(emm_aov_E1_rFFA1)))
plot_uni_E1_FFA1 <- plot_uni(desp_uni_E1_FFA1, contr_aov_E1_lFFA1, contr_aov_E1_rFFA1, "FFA1")
# ggsave('plot_uni_E1_FFA1.png', plot_uni_E1_FFA1, width = 10, height = 10)
plot_uni_E1_FFA1
The above figure shows the neural respones (beta values) in FFA1 for each condition. The numbers are the p-values for the tests of differences between intact vs. exchange in that condition. Error bars represent 95% confidence intervals. Note: “*p<0.1;**p<0.05;***p<0.01”
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_E1_lFFA1_ie))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_FFA1_ie <- cbind(Hemisphere, rbind(as.data.frame(emm_E1_lFFA1_ie), as.data.frame(emm_E1_rFFA1_ie)))
plot_uni_E1_FFA1_ie <- plot_uni(desp_uni_E1_FFA1_ie, simple_E1_lFFA1_ie, simple_E1_rFFA1_ie, "FFA1", F)
# ggsave('plot_uni_E1_FFA1_ie.png', plot_uni_E1_FFA1_ie, width = 10, height = 5)
plot_uni_E1_FFA1_ie
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_E1_lFFA1_tb))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_FFA1_tb <- cbind(Hemisphere, rbind(as.data.frame(emm_E1_lFFA1_tb), as.data.frame(emm_E1_rFFA1_tb)))
plot_uni_E1_FFA1_tb <- plot_uni(desp_uni_E1_FFA1_tb, simple_E1_lFFA1_tb, simple_E1_rFFA1_tb, "FFA1", F)
# ggsave('plot_uni_E1_FFA1_tb.png', plot_uni_E1_FFA1_tb, width = 10, height = 5)
plot_uni_E1_FFA1_tb
# one-sample for results of decode E1 FFA1
one_decode_agg_E1_FFA1 <- {
df_decode_E1_FFA1 %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E1)) %>%
group_by(Hemisphere, ClassifyPair) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E1_FFA1
plot_decode_E1_FFA1 <- plot_decode(one_decode_agg_E1_FFA1, "FFA1")
# ggsave('plot_decode_E1_FFA1.png', plot_decode_E1_FFA1, width = 6.5, height = 16)
plot_decode_E1_FFA1
The above figure shows the decoding accuracy in FFA1 for each pair. The numbers are the p-values for the one-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals. Note: “*p<0.1;**p<0.05;***p<0.01”
one_simi_E1_FFA1 <- {
df_simi_E1_FFA1 %>%
group_by(Hemisphere, Combination) %>%
summarize(mean = t.test(RateAsExchange, mu = 0.5)[[5]],
SD = t.test(RateAsExchange, mu = 0.5)[[7]],
t = t.test(RateAsExchange, mu = 0.5)[[1]],
df = t.test(RateAsExchange, mu = 0.5)[[2]],
p = round(t.test(RateAsExchange, mu = 0.5)[[3]], 5),
lower.CL = t.test(RateAsExchange, mu = 0.5)[[4]][1],
upper.CL = t.test(RateAsExchange, mu = 0.5)[[4]][2],
nullValue = t.test(RateAsExchange, mu = 0.5)[[6]],
alternative = t.test(RateAsExchange, mu = 0.5)[[8]]
)
}
one_simi_E1_FFA1
plot_simi_E1_FFA1 <- plot_simi(one_simi_E1_FFA1, "FFA1")
# ggsave('plot_simi_E1_FFA1.png', plot_simi_E1_FFA1, width = 8, height = 10)
plot_simi_E1_FFA1
The above figure shows the probability of top+bottom being decoded as exchange conditions in FFA1. Patterns of top and bottom were combined with different weights, i.e., “face_top0.25-face_bottom0.75” denotes the linear combinations of face_top and face_bottom with the weights of 0.25/0.75. The numbers are the p-values for the two-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals.
# only keep data for these two labels
df_uni_E1_FFA2 <- filter(df_clean_uni_E1, Label %in% label_FFA2)
df_decode_E1_FFA2 <- filter(df_decode_acc_E1, Label %in% label_FFA2)
df_simi_E1_FFA2 <- filter(df_rate_simi_E1, Label %in% label_FFA2)
df_uni_E1_FFA2 %>%
select(Hemisphere, Label, SessCode) %>%
distinct() %>%
group_by(Hemisphere, Label) %>%
summarize(Count = n())
anova_E1_lFFA2 <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA2, Label == label_FFA2[[1]]))
anova_E1_lFFA2
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 11 0.07 2.65 .01 .13
## 2 Layout 2.39, 26.30 0.02 6.90 ** .02 .003
## 3 FaceWord:Layout 2.31, 25.37 0.04 0.21 .001 .84
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E1_lFFA2 <- emmeans(anova_E1_lFFA2, ~ FaceWord * Layout)
emm_aov_E1_lFFA2 %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E1_lFFA2, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## faces - words 0.0908 0.0558 11 1.628 0.1318
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E1_lFFA2, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.1022 0.0404 33 2.531 0.0734
## intact - top 0.1830 0.0404 33 4.532 0.0004
## intact - bottom 0.0863 0.0404 33 2.138 0.1624
## exchange - top 0.0808 0.0404 33 2.001 0.2082
## exchange - bottom -0.0159 0.0404 33 -0.394 0.9790
## top - bottom -0.0967 0.0404 33 -2.394 0.0980
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E1_lFFA2 <- contrast(emm_aov_E1_lFFA2, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_anova_E1, interaction = "pairwise") # , adjust = "none"
contr_aov_E1_lFFA2
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words 0.13378 0.0822 35.6 1.627 0.1126
## exchange . faces - words 0.07368 0.0822 35.6 0.896 0.3762
## top . faces - words 0.06034 0.0822 35.6 0.734 0.4678
## bottom . faces - words 0.09558 0.0822 35.6 1.162 0.2528
## . faces intact - exchange 0.13228 0.0637 63.5 2.075 0.0420
## . faces intact - top 0.21976 0.0637 63.5 3.448 0.0010
## . faces intact - bottom 0.10544 0.0637 63.5 1.654 0.1030
## . faces exchange - top 0.08748 0.0637 63.5 1.373 0.1747
## . faces exchange - bottom -0.02684 0.0637 63.5 -0.421 0.6751
## . faces top - bottom -0.11433 0.0637 63.5 -1.794 0.0776
## . words intact - exchange 0.07219 0.0637 63.5 1.133 0.2617
## . words intact - top 0.14633 0.0637 63.5 2.296 0.0250
## . words intact - bottom 0.06724 0.0637 63.5 1.055 0.2954
## . words exchange - top 0.07414 0.0637 63.5 1.163 0.2491
## . words exchange - bottom -0.00494 0.0637 63.5 -0.078 0.9384
## . words top - bottom -0.07909 0.0637 63.5 -1.241 0.2193
2(face vs. word)$$2(intact vs. exchange) ANOVA
anova_E1_lFFA2_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA2,
Label == label_FFA2[[1]],
Layout %in% c("intact", "exchange")))
anova(anova_E1_lFFA2_ie, "pes")
emm_E1_lFFA2_ie <- emmeans(anova_E1_lFFA2_ie, ~ FaceWord + Layout)
(simple_E1_lFFA2_ie <- pairs(emm_E1_lFFA2_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words 0.1338 0.0834 17.5 1.604 0.1267
## exchange . faces - words 0.0737 0.0834 17.5 0.883 0.3891
## . faces intact - exchange 0.1323 0.0627 21.6 2.110 0.0466
## . words intact - exchange 0.0722 0.0627 21.6 1.152 0.2621
2(face vs. word)$$2(top vs. bottom) ANOVA
anova_E1_lFFA2_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA2,
Label == label_FFA2[[1]],
Layout %in% c("top", "bottom")))
anova(anova_E1_lFFA2_tb, "pes")
emm_E1_lFFA2_tb <- emmeans(anova_E1_lFFA2_tb, ~ FaceWord + Layout)
(simple_E1_lFFA2_tb <- pairs(emm_E1_lFFA2_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## top . faces - words 0.0603 0.0810 21.8 0.745 0.4643
## bottom . faces - words 0.0956 0.0810 21.8 1.180 0.2507
## . faces top - bottom -0.1143 0.0612 16.4 -1.867 0.0799
## . words top - bottom -0.0791 0.0612 16.4 -1.291 0.2145
anova_E1_rFFA2 <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA2, Label == label_FFA2[[2]]))
anova_E1_rFFA2
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 12 0.21 9.41 ** .10 .010
## 2 Layout 2.50, 30.03 0.03 9.05 *** .03 .0004
## 3 FaceWord:Layout 2.42, 29.01 0.03 3.43 * .02 .04
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E1_rFFA2 <- emmeans(anova_E1_rFFA2, ~ FaceWord * Layout)
emm_aov_E1_rFFA2 %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E1_rFFA2, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## faces - words 0.275 0.0896 12 3.068 0.0098
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E1_rFFA2, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.18696 0.0411 36 4.553 0.0003
## intact - top 0.18061 0.0411 36 4.398 0.0005
## intact - bottom 0.14401 0.0411 36 3.507 0.0065
## exchange - top -0.00635 0.0411 36 -0.155 0.9987
## exchange - bottom -0.04295 0.0411 36 -1.046 0.7240
## top - bottom -0.03660 0.0411 36 -0.891 0.8094
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E1_rFFA2 <- contrast(emm_aov_E1_rFFA2, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_anova_E1, interaction = "pairwise") # , adjust = "none"
contr_aov_E1_rFFA2
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words 0.44008 0.1050 21.6 4.189 0.0004
## exchange . faces - words 0.16375 0.1050 21.6 1.559 0.1335
## top . faces - words 0.25395 0.1050 21.6 2.417 0.0245
## bottom . faces - words 0.24186 0.1050 21.6 2.302 0.0313
## . faces intact - exchange 0.32512 0.0607 71.5 5.353 <.0001
## . faces intact - top 0.27367 0.0607 71.5 4.506 <.0001
## . faces intact - bottom 0.24312 0.0607 71.5 4.003 0.0002
## . faces exchange - top -0.05145 0.0607 71.5 -0.847 0.3998
## . faces exchange - bottom -0.08200 0.0607 71.5 -1.350 0.1812
## . faces top - bottom -0.03055 0.0607 71.5 -0.503 0.6165
## . words intact - exchange 0.04879 0.0607 71.5 0.803 0.4244
## . words intact - top 0.08754 0.0607 71.5 1.441 0.1539
## . words intact - bottom 0.04490 0.0607 71.5 0.739 0.4621
## . words exchange - top 0.03875 0.0607 71.5 0.638 0.5255
## . words exchange - bottom -0.00389 0.0607 71.5 -0.064 0.9491
## . words top - bottom -0.04264 0.0607 71.5 -0.702 0.4849
anova_E1_rFFA2_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA2,
Label == label_FFA2[[2]],
Layout %in% c("intact", "exchange")))
anova(anova_E1_rFFA2_ie, "pes")
emm_E1_rFFA2_ie <- emmeans(anova_E1_rFFA2_ie, ~ FaceWord + Layout)
(simple_E1_rFFA2_ie <- pairs(emm_E1_rFFA2_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words 0.4401 0.1052 15.5 4.184 0.0007
## exchange . faces - words 0.1638 0.1052 15.5 1.557 0.1397
## . faces intact - exchange 0.3251 0.0583 23.4 5.577 <.0001
## . words intact - exchange 0.0488 0.0583 23.4 0.837 0.4110
2(face vs. word)$$2(top vs. bottom) ANOVA
anova_E1_rFFA2_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_FFA2,
Label == label_FFA2[[2]],
Layout %in% c("top", "bottom")))
anova(anova_E1_rFFA2_tb, "pes")
emm_E1_rFFA2_tb <- emmeans(anova_E1_rFFA2_tb, ~ FaceWord + Layout)
(simple_E1_rFFA2_tb <- pairs(emm_E1_rFFA2_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## top . faces - words 0.2539 0.1049 19.1 2.421 0.0256
## bottom . faces - words 0.2419 0.1049 19.1 2.306 0.0325
## . faces top - bottom -0.0306 0.0609 19.7 -0.501 0.6216
## . words top - bottom -0.0426 0.0609 19.7 -0.700 0.4922
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_aov_E1_lFFA2))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_FFA2 <- cbind(Hemisphere, rbind(as.data.frame(emm_aov_E1_lFFA2), as.data.frame(emm_aov_E1_rFFA2)))
plot_uni_E1_FFA2 <- plot_uni(desp_uni_E1_FFA2, contr_aov_E1_lFFA2, contr_aov_E1_rFFA2, "FFA2")
# ggsave('plot_uni_E1_FFA2.png', plot_uni_E1_FFA2, width = 10, height = 10)
plot_uni_E1_FFA2
The above figure shows the neural respones (beta values) in FFA2 for each condition. The numbers are the p-values for the tests of differences between intact vs. exchange in that condition. Error bars represent 95% confidence intervals. Note: “*p<0.1;**p<0.05;***p<0.01”
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_E1_lFFA2_ie))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_FFA2_ie <- cbind(Hemisphere, rbind(as.data.frame(emm_E1_lFFA2_ie), as.data.frame(emm_E1_rFFA2_ie)))
plot_uni_E1_FFA2_ie <- plot_uni(desp_uni_E1_FFA2_ie, simple_E1_lFFA2_ie, simple_E1_rFFA2_ie, "FFA2", F)
# ggsave('plot_uni_E1_FFA2_ie.png', plot_uni_E1_FFA2_ie, width = 10, height = 5)
plot_uni_E1_FFA2_ie
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_E1_lFFA2_tb))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_FFA2_tb <- cbind(Hemisphere, rbind(as.data.frame(emm_E1_lFFA2_tb), as.data.frame(emm_E1_rFFA2_tb)))
plot_uni_E1_FFA2_tb <- plot_uni(desp_uni_E1_FFA2_tb, simple_E1_lFFA2_tb, simple_E1_rFFA2_tb, "FFA2", F)
# ggsave('plot_uni_E1_FFA2_tb.png', plot_uni_E1_FFA2_tb, width = 10, height = 5)
plot_uni_E1_FFA2_tb
# one-sample for results of decode E1 FFA2
one_decode_agg_E1_FFA2 <- {
df_decode_E1_FFA2 %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E1)) %>%
group_by(Hemisphere, ClassifyPair) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E1_FFA2
plot_decode_E1_FFA2 <- plot_decode(one_decode_agg_E1_FFA2, "FFA2")
# ggsave('plot_decode_E1_FFA2.png', plot_decode_E1_FFA2, width = 6.5, height = 16)
plot_decode_E1_FFA2
The above figure shows the decoding accuracy in FFA2 for each pair. The numbers are the p-values for the one-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals. Note: “*p<0.1;**p<0.05;***p<0.01”
# Similarity of top + bottom to intact vs. exchange in FFA
one_simi_E1_FFA2 <- {
df_simi_E1_FFA2 %>%
group_by(Hemisphere, Combination) %>%
summarize(mean = t.test(RateAsExchange, mu = 0.5)[[5]],
SD = t.test(RateAsExchange, mu = 0.5)[[7]],
t = t.test(RateAsExchange, mu = 0.5)[[1]],
df = t.test(RateAsExchange, mu = 0.5)[[2]],
p = round(t.test(RateAsExchange, mu = 0.5)[[3]], 5),
lower.CL = t.test(RateAsExchange, mu = 0.5)[[4]][1],
upper.CL = t.test(RateAsExchange, mu = 0.5)[[4]][2],
nullValue = t.test(RateAsExchange, mu = 0.5)[[6]],
alternative = t.test(RateAsExchange, mu = 0.5)[[8]]
)
}
one_simi_E1_FFA2
plot_simi_E1_FFA2 <- plot_simi(one_simi_E1_FFA2, "FFA2")
# ggsave('plot_simi_E1_FFA2.png', plot_simi_E1_FFA2, width = 8, height = 10)
plot_simi_E1_FFA2
The above figure shows the probability of top+bottom being decoded as exchange conditions in FFA2. Patterns of top and bottom were combined with different weights, i.e., “face_top0.25-face_bottom0.75” denotes the linear combinations of face_top and face_bottom with the weights of 0.25/0.75. The numbers are the p-values for the two-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals.
# only keep data for these two labels
df_uni_E1_VWFA <- filter(df_clean_uni_E1, Label %in% label_VWFA)
df_decode_E1_VWFA <- filter(df_decode_acc_E1, Label %in% label_VWFA)
df_simi_E1_VWFA <- filter(df_rate_simi_E1, Label %in% label_VWFA)
# subjects used for each hemisphere
# unique(as.character((df_univar_agg_E1_VWFA %>% filter(Label == label_VWFA_E1))$SubjCode))
df_uni_E1_VWFA %>%
select(Hemisphere, Label, SessCode) %>%
distinct() %>%
group_by(Hemisphere, Label) %>%
summarize(Count = n())
anova_E1_VWFA <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_VWFA, Label == label_VWFA))
anova_E1_VWFA
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 17 0.21 100.25 *** .25 <.0001
## 2 Layout 2.53, 43.04 0.03 4.04 * .005 .02
## 3 FaceWord:Layout 2.57, 43.65 0.03 5.40 ** .005 .005
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E1_VWFA <- emmeans(anova_E1_VWFA, ~ FaceWord * Layout)
emm_aov_E1_VWFA %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E1_VWFA, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## faces - words -0.773 0.0772 17 -10.012 <.0001
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E1_VWFA, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange -0.05205 0.0368 51 -1.413 0.4974
## intact - top 0.07544 0.0368 51 2.048 0.1844
## intact - bottom 0.00925 0.0368 51 0.251 0.9944
## exchange - top 0.12748 0.0368 51 3.460 0.0059
## exchange - bottom 0.06130 0.0368 51 1.664 0.3531
## top - bottom -0.06619 0.0368 51 -1.796 0.2868
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E1_VWFA <- contrast(emm_aov_E1_VWFA, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_aov_E1, interaction = "pairwise") # , adjust = "none"
contr_aov_E1_VWFA
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words -0.69862 0.0881 27.9 -7.932 <.0001
## exchange . faces - words -0.90436 0.0881 27.9 -10.268 <.0001
## top . faces - words -0.65964 0.0881 27.9 -7.490 <.0001
## bottom . faces - words -0.82940 0.0881 27.9 -9.417 <.0001
## . faces intact - exchange 0.05082 0.0505 101.6 1.005 0.3171
## . faces intact - top 0.05595 0.0505 101.6 1.107 0.2710
## . faces intact - bottom 0.07464 0.0505 101.6 1.477 0.1429
## . faces exchange - top 0.00513 0.0505 101.6 0.101 0.9194
## . faces exchange - bottom 0.02382 0.0505 101.6 0.471 0.6385
## . faces top - bottom 0.01869 0.0505 101.6 0.370 0.7123
## . words intact - exchange -0.15491 0.0505 101.6 -3.065 0.0028
## . words intact - top 0.09493 0.0505 101.6 1.878 0.0633
## . words intact - bottom -0.05614 0.0505 101.6 -1.111 0.2694
## . words exchange - top 0.24984 0.0505 101.6 4.943 <.0001
## . words exchange - bottom 0.09878 0.0505 101.6 1.954 0.0534
## . words top - bottom -0.15106 0.0505 101.6 -2.989 0.0035
2(face vs. word)$$2(intact vs. exchange) ANOVA
anova_E1_VWFA_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_VWFA,
Label == label_VWFA[[1]],
Layout %in% c("intact", "exchange")))
anova(anova_E1_VWFA_ie, "pes")
emm_E1_VWFA_ie <- emmeans(anova_E1_VWFA_ie, ~ FaceWord + Layout)
(simple_E1_VWFA_ie <- pairs(emm_E1_VWFA_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words -0.6986 0.0872 21.1 -8.015 <.0001
## exchange . faces - words -0.9044 0.0872 21.1 -10.376 <.0001
## . faces intact - exchange 0.0508 0.0390 33.8 1.302 0.2017
## . words intact - exchange -0.1549 0.0390 33.8 -3.969 0.0004
2(face vs. word)$$2(top vs. bottom) ANOVA
anova_E1_VWFA_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_VWFA,
Label == label_VWFA[[1]],
Layout %in% c("top", "bottom")))
anova(anova_E1_VWFA_tb, "pes")
emm_E1_VWFA_tb <- emmeans(anova_E1_VWFA_tb, ~ FaceWord + Layout)
(simple_E1_VWFA_tb <- pairs(emm_E1_VWFA_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## top . faces - words -0.6596 0.089 24.1 -7.413 <.0001
## bottom . faces - words -0.8294 0.089 24.1 -9.321 <.0001
## . faces top - bottom 0.0187 0.053 34.0 0.353 0.7263
## . words top - bottom -0.1511 0.053 34.0 -2.852 0.0073
nRow_E1 <-nrow(as.data.frame(emm_aov_E1_VWFA))
Hemisphere <- c(rep("left", nRow_E1))
desp_uni_E1_VWFA <- cbind(Hemisphere, as.data.frame(emm_aov_E1_VWFA))
plot_uni_E1_VWFA <- plot_uni_vwfa(desp_uni_E1_VWFA, contr_aov_E1_VWFA, "VWFA")
# ggsave('plot_uni_E1_VWFA.png', plot_uni_E1_VWFA, width = 5.5, height = 10)
plot_uni_E1_VWFA
The above figure shows the neural respones (beta values) in VWFA for each condition. The numbers are the p-values for the tests of differences between intact vs. exchange in that condition. Error bars represent 95% confidence intervals. Note: *, p < .05
nRow_E1 <-nrow(as.data.frame(emm_E1_VWFA_ie))
Hemisphere <- c(rep("left", nRow_E1))
desp_uni_E1_VWFA_ie <- cbind(Hemisphere, as.data.frame(emm_E1_VWFA_ie))
plot_uni_E1_VWFA_ie <- plot_uni_vwfa(desp_uni_E1_VWFA_ie, simple_E1_VWFA_ie, "VWFA", FALSE)
# ggsave('plot_uni_E1_VWFA_ie.png', plot_uni_E1_VWFA_ie, width = 5.5, height = 10)
plot_uni_E1_VWFA_ie
nRow_E1 <-nrow(as.data.frame(emm_E1_VWFA_tb))
Hemisphere <- c(rep("left", nRow_E1))
desp_uni_E1_VWFA_tb <- cbind(Hemisphere, as.data.frame(emm_E1_VWFA_tb))
plot_uni_E1_VWFA_tb <- plot_uni_vwfa(desp_uni_E1_VWFA_tb, simple_E1_VWFA_tb, "VWFA", FALSE)
# ggsave('plot_uni_E1_VWFA_tb.png', plot_uni_E1_VWFA_tb, width = 5.5, height = 10)
plot_uni_E1_VWFA_tb
# one-sample for results of decode E1 VWFA
one_decode_agg_E1_VWFA <- {
df_decode_E1_VWFA %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E1)) %>%
group_by(Hemisphere, ClassifyPair) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E1_VWFA
plot_decode_E1_VWFA <- plot_decode_vwfa(one_decode_agg_E1_VWFA, "VWFA")
# ggsave('plot_decode_E1_VWFA.png', plot_decode_E1_VWFA, width = 4, height = 16)
plot_decode_E1_VWFA
The above figure shows the decoding accuracy in VWFA for each pair. The numbers are the p-values for the one-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals. Note: ***, p <.001
# Similarity of top + bottom to intact vs. exchange in VWFA
one_simi_E1_VWFA <- {
df_simi_E1_VWFA %>%
group_by(Hemisphere, Combination) %>%
summarize(mean = t.test(RateAsExchange, mu = 0.5)[[5]],
SD = t.test(RateAsExchange, mu = 0.5)[[7]],
t = t.test(RateAsExchange, mu = 0.5)[[1]],
df = t.test(RateAsExchange, mu = 0.5)[[2]],
p = round(t.test(RateAsExchange, mu = 0.5)[[3]], 5),
lower.CL = t.test(RateAsExchange, mu = 0.5)[[4]][1],
upper.CL = t.test(RateAsExchange, mu = 0.5)[[4]][2],
nullValue = t.test(RateAsExchange, mu = 0.5)[[6]],
alternative = t.test(RateAsExchange, mu = 0.5)[[8]]
)
}
one_simi_E1_VWFA
plot_simi_E1_VWFA <- plot_simi_vwfa(one_simi_E1_VWFA, "VWFA")
# ggsave('plot_simi_E1_VWFA.png', plot_simi_E1_VWFA, width = 4.25, height = 10)
plot_simi_E1_VWFA
The above figure shows the probability of top+bottom being decoded as exchange conditions in VWFA. Patterns of top and bottom were combined with different weights, i.e., “face_top0.25-face_bottom0.75” denotes the linear combinations of face_top and face_bottom with the weights of 0.25/0.75. The numbers are the p-values for the two-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals.
# only keep data for these two labels
df_uni_E1_LO <- filter(df_clean_uni_E1, Label %in% label_LO)
df_decode_E1_LO <- filter(df_decode_acc_E1, Label %in% label_LO)
df_simi_E1_LO <- filter(df_rate_simi_E1, Label %in% label_LO)
df_uni_E1_LO %>%
select(Hemisphere, Label, SessCode) %>%
distinct() %>%
group_by(Hemisphere, Label) %>%
summarize(Count = n())
anova_E1_lLO <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_LO, Label == label_LO[[1]]))
anova_E1_lLO
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 18 0.22 22.93 *** .06 .0001
## 2 Layout 2.45, 44.18 0.04 3.92 * .005 .02
## 3 FaceWord:Layout 2.42, 43.48 0.03 0.40 .0004 .71
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E1_lLO <- emmeans(anova_E1_lLO, ~ FaceWord * Layout)
emm_aov_E1_lLO %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E1_lLO, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## faces - words -0.362 0.0755 18 -4.789 0.0001
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E1_lLO, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.00662 0.0425 54 0.156 0.9986
## intact - top 0.12934 0.0425 54 3.043 0.0184
## intact - bottom 0.04907 0.0425 54 1.155 0.6577
## exchange - top 0.12272 0.0425 54 2.888 0.0277
## exchange - bottom 0.04245 0.0425 54 0.999 0.7507
## top - bottom -0.08027 0.0425 54 -1.889 0.2449
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E1_lLO <- contrast(emm_aov_E1_lLO, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_aov_E1, interaction = "pairwise") # , adjust = "none"
contr_aov_E1_lLO
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words -0.32762 0.0872 30.9 -3.757 0.0007
## exchange . faces - words -0.34424 0.0872 30.9 -3.948 0.0004
## top . faces - words -0.37437 0.0872 30.9 -4.293 0.0002
## bottom . faces - words -0.39984 0.0872 30.9 -4.585 0.0001
## . faces intact - exchange 0.01493 0.0555 104.8 0.269 0.7883
## . faces intact - top 0.15271 0.0555 104.8 2.754 0.0070
## . faces intact - bottom 0.08517 0.0555 104.8 1.536 0.1276
## . faces exchange - top 0.13779 0.0555 104.8 2.484 0.0146
## . faces exchange - bottom 0.07025 0.0555 104.8 1.267 0.2081
## . faces top - bottom -0.06754 0.0555 104.8 -1.218 0.2260
## . words intact - exchange -0.00169 0.0555 104.8 -0.030 0.9757
## . words intact - top 0.10596 0.0555 104.8 1.911 0.0588
## . words intact - bottom 0.01296 0.0555 104.8 0.234 0.8157
## . words exchange - top 0.10765 0.0555 104.8 1.941 0.0549
## . words exchange - bottom 0.01465 0.0555 104.8 0.264 0.7922
## . words top - bottom -0.09300 0.0555 104.8 -1.677 0.0965
2(face vs. word)$$2(intact vs. exchange) ANOVA
anova_E1_lLO_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_LO,
Label == label_LO[[1]],
Layout %in% c("intact", "exchange")))
anova(anova_E1_lLO_ie, "pes")
emm_E1_lLO_ie <- emmeans(anova_E1_lLO_ie, ~ FaceWord + Layout)
(simple_E1_lLO_ie <- pairs(emm_E1_lLO_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words -0.32762 0.0955 22 -3.429 0.0024
## exchange . faces - words -0.34424 0.0955 22 -3.603 0.0016
## . faces intact - exchange 0.01493 0.0469 35 0.318 0.7520
## . words intact - exchange -0.00169 0.0469 35 -0.036 0.9714
2(face vs. word)$$2(top vs. bottom) ANOVA
anova_E1_lLO_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_LO,
Label == label_LO[[1]],
Layout %in% c("top", "bottom")))
anova(anova_E1_lLO_tb, "pes")
emm_E1_lLO_tb <- emmeans(anova_E1_lLO_tb, ~ FaceWord + Layout)
(simple_E1_lLO_tb <- pairs(emm_E1_lLO_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## top . faces - words -0.3744 0.078 23.2 -4.801 0.0001
## bottom . faces - words -0.3998 0.078 23.2 -5.128 <.0001
## . faces top - bottom -0.0675 0.054 29.6 -1.251 0.2206
## . words top - bottom -0.0930 0.054 29.6 -1.723 0.0953
aov_E1_rLO <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_LO, Label == label_LO[[2]]))
aov_E1_rLO
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 17 0.20 8.43 ** .02 .010
## 2 Layout 2.18, 37.07 0.07 1.47 .002 .24
## 3 FaceWord:Layout 2.63, 44.74 0.03 1.27 .001 .30
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E1_rLO <- emmeans(aov_E1_rLO, ~ FaceWord * Layout)
emm_aov_E1_rLO %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E1_rLO, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## faces - words -0.217 0.0747 17 -2.904 0.0099
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E1_rLO, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.0825 0.0532 51 1.551 0.4156
## intact - top 0.1037 0.0532 51 1.948 0.2212
## intact - bottom 0.0449 0.0532 51 0.843 0.8339
## exchange - top 0.0212 0.0532 51 0.398 0.9785
## exchange - bottom -0.0377 0.0532 51 -0.708 0.8935
## top - bottom -0.0589 0.0532 51 -1.106 0.6877
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E1_rLO <- contrast(emm_aov_E1_rLO, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_aov_E1, interaction = "pairwise") # , adjust = "none"
contr_aov_E1_rLO
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words -0.14282 0.0893 32.6 -1.600 0.1193
## exchange . faces - words -0.29775 0.0893 32.6 -3.335 0.0021
## top . faces - words -0.21835 0.0893 32.6 -2.446 0.0200
## bottom . faces - words -0.20910 0.0893 32.6 -2.342 0.0254
## . faces intact - exchange 0.16001 0.0665 94.5 2.406 0.0181
## . faces intact - top 0.14149 0.0665 94.5 2.127 0.0360
## . faces intact - bottom 0.07800 0.0665 94.5 1.173 0.2439
## . faces exchange - top -0.01853 0.0665 94.5 -0.279 0.7812
## . faces exchange - bottom -0.08202 0.0665 94.5 -1.233 0.2206
## . faces top - bottom -0.06349 0.0665 94.5 -0.954 0.3423
## . words intact - exchange 0.00508 0.0665 94.5 0.076 0.9393
## . words intact - top 0.06595 0.0665 94.5 0.992 0.3240
## . words intact - bottom 0.01171 0.0665 94.5 0.176 0.8606
## . words exchange - top 0.06087 0.0665 94.5 0.915 0.3625
## . words exchange - bottom 0.00663 0.0665 94.5 0.100 0.9208
## . words top - bottom -0.05424 0.0665 94.5 -0.815 0.4169
anova_E1_rLO_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_LO,
Label == label_LO[[2]],
Layout %in% c("intact", "exchange")))
anova(anova_E1_rLO_ie, "pes")
emm_E1_rLO_ie <- emmeans(anova_E1_rLO_ie, ~ FaceWord + Layout)
(simple_E1_rLO_ie <- pairs(emm_E1_rLO_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . faces - words -0.14282 0.0929 22.8 -1.537 0.1380
## exchange . faces - words -0.29775 0.0929 22.8 -3.205 0.0040
## . faces intact - exchange 0.16001 0.0573 32.6 2.794 0.0087
## . words intact - exchange 0.00508 0.0573 32.6 0.089 0.9299
2(face vs. word)$$2(top vs. bottom) ANOVA
anova_E1_rLO_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E1_LO,
Label == label_LO[[2]],
Layout %in% c("top", "bottom")))
anova(anova_E1_rLO_tb, "pes")
emm_E1_rLO_tb <- emmeans(anova_E1_rLO_tb, ~ FaceWord + Layout)
(simple_E1_rLO_tb <- pairs(emm_E1_rLO_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## top . faces - words -0.2184 0.0855 26.4 -2.554 0.0167
## bottom . faces - words -0.2091 0.0855 26.4 -2.446 0.0214
## . faces top - bottom -0.0635 0.0614 33.7 -1.033 0.3088
## . words top - bottom -0.0542 0.0614 33.7 -0.883 0.3836
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_aov_E1_lLO))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_LO <- cbind(Hemisphere, rbind(as.data.frame(emm_aov_E1_lLO), as.data.frame(emm_aov_E1_rLO)))
plot_uni_E1_LO <- plot_uni(desp_uni_E1_LO, contr_aov_E1_lLO, contr_aov_E1_rLO, "LO")
# ggsave('plot_uni_E1_LO.png', plot_uni_E1_LO, width = 10, height = 10)
plot_uni_E1_LO
The above figure shows the neural respones (beta values) in LO for each condition. The numbers are the p-values for the tests of differences between intact vs. exchange in that condition. Error bars represent 95% confidence intervals. Note: *, p < .05
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_E1_lLO_ie))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_LO_ie <- cbind(Hemisphere, rbind(as.data.frame(emm_E1_lLO_ie), as.data.frame(emm_E1_rLO_ie)))
plot_uni_E1_LO_ie <- plot_uni(desp_uni_E1_LO_ie, simple_E1_lLO_ie, simple_E1_rLO_ie, "LO", F)
# ggsave('plot_uni_E1_LO_ie.png', plot_uni_E1_LO_ie, width = 10, height = 5)
plot_uni_E1_LO_ie
# add the column of Hemisphere
nRow_E1 <-nrow(as.data.frame(emm_E1_lLO_tb))
Hemisphere <- c(rep("left", nRow_E1), rep("right", nRow_E1))
desp_uni_E1_LO_tb <- cbind(Hemisphere, rbind(as.data.frame(emm_E1_lLO_tb), as.data.frame(emm_E1_rLO_tb)))
plot_uni_E1_LO_tb <- plot_uni(desp_uni_E1_LO_tb, simple_E1_lLO_tb, simple_E1_rLO_tb, "LO", F)
# ggsave('plot_uni_E1_LO_tb.png', plot_uni_E1_LO_tb, width = 10, height = 5)
plot_uni_E1_LO_tb
# one-sample for results of decode E1 LO
one_decode_agg_E1_LO <- {
df_decode_E1_LO %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E1)) %>%
group_by(Hemisphere, ClassifyPair) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E1_LO
plot_decode_E1_LO <- plot_decode(one_decode_agg_E1_LO, "LO")
# ggsave('plot_decode_E1_LO.png', plot_decode_E1_LO, width = 6.5, height = 16)
plot_decode_E1_LO
The above figure shows the decoding accuracy in LO for each pair. The numbers are the p-values for the one-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals. Note: , p < .01; *, p <.001
# Similarity of top + bottom to intact vs. exchange in LO
one_simi_E1_LO <- {
df_simi_E1_LO %>%
group_by(Hemisphere, Combination) %>%
summarize(mean = t.test(RateAsExchange, mu = 0.5)[[5]],
SD = t.test(RateAsExchange, mu = 0.5)[[7]],
t = t.test(RateAsExchange, mu = 0.5)[[1]],
df = t.test(RateAsExchange, mu = 0.5)[[2]],
p = round(t.test(RateAsExchange, mu = 0.5)[[3]], 5),
lower.CL = t.test(RateAsExchange, mu = 0.5)[[4]][1],
upper.CL = t.test(RateAsExchange, mu = 0.5)[[4]][2],
nullValue = t.test(RateAsExchange, mu = 0.5)[[6]],
alternative = t.test(RateAsExchange, mu = 0.5)[[8]]
)
}
one_simi_E1_LO
plot_simi_E1_LO <- plot_simi(one_simi_E1_LO, "LO")
# ggsave('plot_simi_E1_LO.png', plot_simi_E1_LO, width = 8, height = 10)
plot_simi_E1_LO
The above figure shows the probability of top+bottom being decoded as exchange conditions in LO. Patterns of top and bottom were combined with different weights, i.e., “face_top0.25-face_bottom0.75” denotes the linear combinations of face_top and face_bottom with the weights of 0.25/0.75. The numbers are the p-values for the two-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals.
df_label_E2 <- read_csv(file.path("data", "faceword_E2_Label_HJ.csv")) %>%
mutate(roi = str_remove(Label, "roi."),
roi = str_remove(roi, ".label")) %>%
mutate(Subject = str_replace(SubjCode, "\\_.*", ""))
# df_label %>% head()
df_label_E2 %>%
select(SubjCode, roi, Size) %>%
pivot_wider(names_from = roi, values_from = Size) %>%
arrange(SubjCode)
The above table displays the size (in mm2) of each label for each participant. (NA denotes that this label is not available for that particiapnt.)
df_label_E2 %>%
select(SubjCode, roi, NVtxs) %>%
pivot_wider(names_from = roi, values_from = NVtxs) %>%
arrange(SubjCode)
The above table displays the number of vertices for each label and each participant. (NA denotes that this label is not available for that particiapnt.)
df_label_E2 %>%
group_by(Label, roi) %>%
summarize(Count = n(),
meanSize = mean(Size),
meanNVtx = mean(NVtxs))
df_nlabel_E2 <- df_label_E2 %>%
filter(Size > nVtx_size_min) %>%
group_by(Label, roi) %>%
summarize(Count = n(),
meanSize = mean(Size),
meanNVtx = mean(NVtxs))
df_nlabel_E2
The above table dispalys the number of participants included in the following analyses for each ROI. (VWFA is only found on the left hemisphere.)
# load data file from functional scans for univerate analysis
df_uni_E2 <- read_csv(file.path("data", "faceword_E2_Uni_HJ.csv"))
head(df_uni_E2)
df_clean_uni_E2 <- {
df_uni_E2 %>%
filter(Response != "NaN") %>%
separate(Condition, c("FaceWord", "Layout"), "_") %>% # separate the conditions into two IVs
mutate(Layout_ = factor(Layout, levels = layout_order), # convert the two IVs to factors
Hemisphere = if_else(grepl("lh", Label), "left", if_else(grepl("rh", Label), "right", "NA")),
Layout = fct_recode(Layout_, partA = "top", partB = "bottom")) %>% # rename top and bottom as part1 and part2
select(Hemisphere, Label, SessCode, FaceWord, Layout, Response) %>%
mutate(Subject = str_replace(SessCode, "\\_.*", "")) %>%
left_join(df_label_E2, by = c("Label", "Subject")) %>%
filter(Size > nVtx_size_min)
}
head(df_clean_uni_E2)
pair_order_E2 <- c("English_intact-Chinese_intact",
"English_intact-English_exchange",
"English_partA-English_partB", # English_top-English_bottom
"Chinese_intact-Chinese_exchange",
"Chinese_partA-Chinese_partB") # Chinese_top-Chinese_bottom
df_decode_E2 <- read_csv(file.path("data", "faceword_E2_Decode_noz.csv"))
head(df_decode_E2)
df_clean_decode_E2 <- df_decode_E2 %>%
select(Label, SessCode, ClassifyPair, ACC) %>%
mutate(Hemisphere = if_else(grepl("lh", Label), "left",
if_else(grepl("rh", Label), "right", "NA")),
Subject = str_remove(SessCode, "\\_.*"),
ClassifyPair = fct_recode(ClassifyPair,
`Chinese_partA-Chinese_partB` = "Chinese_top-Chinese_bottom",
`English_partA-English_partB` = "English_top-English_bottom"),
ClassifyPair = factor(ClassifyPair, levels = pair_order_E2)) %>%
left_join(df_label_E2, by = c("Label", "Subject")) %>%
filter(Size > nVtx_size_min)
df_decode_acc_E2 <- df_clean_decode_E2 %>%
group_by(Hemisphere, Label, SessCode, ClassifyPair) %>% # divide the data into groups by these columns
summarize(Accuracy = mean(ACC), Count = n()) %>%
ungroup()
df_decode_acc_E2
simi_order_E2 <- c("English_partA0.25-English_partB0.75",
"English_partA0.50-English_partB0.50",
"English_partA0.75-English_partB0.25",
"Chinese_partA0.25-Chinese_partB0.75",
"Chinese_partA0.50-Chinese_partB0.50",
"Chinese_partA0.75-Chinese_partB0.25")
df_simi_E2 <- read_csv(file.path("data", "faceword_E2_Similarity_noz.csv"))
head(df_simi_E2)
df_clean_simi_E2 <- df_simi_E2 %>%
mutate(asExchange = if_else(grepl("exchange", PredictCond), 1, 0), # binary prediction
pExchange = Probability_2, # probability prediction
Subject = str_remove(SessCode, "\\_.*"),
Combination = gsub("top", "partA", Combination),
Combination = gsub("bottom", "partB", Combination),
Combination = factor(Combination, levels = simi_order_E2)) %>%
left_join(df_label_E2, by = c("Label", "Subject")) %>%
filter(Size > nVtx_size_min)
df_rate_simi_E2 <- df_clean_simi_E2 %>%
group_by(SessCode, Label, ClassPair_1, Combination) %>%
summarize(binaryAsExchange = mean(asExchange),
pAsExchange = mean(pExchange),
RateAsExchange = pAsExchange) %>% # use the probability instead of the categorical prediction
ungroup() %>%
mutate(Hemisphere = if_else(grepl("lh", Label), 'left', if_else(grepl("rh", Label), "right", "NA")))
head(df_rate_simi_E2)
# only keep data for these two labels
df_uni_E2_FFA1 <- filter(df_clean_uni_E2, Label %in% label_FFA1)
df_decode_E2_FFA1 <- filter(df_decode_acc_E2, Label %in% label_FFA1)
df_simi_E2_FFA1 <- filter(df_rate_simi_E2, Label %in% label_FFA1)
df_uni_E2_FFA1 %>%
select(Hemisphere, Label, SessCode) %>%
distinct() %>%
group_by(Hemisphere, Label) %>%
summarize(Count = n())
anova_E2_lFFA1 <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA1, Label == label_FFA1[[1]]))
anova_E2_lFFA1
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 11 0.22 12.53 ** .10 .005
## 2 Layout 1.73, 19.00 0.03 3.34 + .007 .06
## 3 FaceWord:Layout 2.25, 24.78 0.04 4.10 * .01 .03
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E2_lFFA1 <- emmeans(anova_E2_lFFA1, ~ FaceWord * Layout)
emm_aov_E2_lFFA1 %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E2_lFFA1, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## English - Chinese 0.338 0.0954 11 3.539 0.0046
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E2_lFFA1, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange -0.10656 0.0394 33 -2.706 0.0499
## intact - partA 0.00125 0.0394 33 0.032 1.0000
## intact - partB -0.04698 0.0394 33 -1.193 0.6354
## exchange - partA 0.10781 0.0394 33 2.738 0.0464
## exchange - partB 0.05958 0.0394 33 1.513 0.4414
## partA - partB -0.04823 0.0394 33 -1.225 0.6159
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E2_lFFA1 <- contrast(emm_aov_E2_lFFA1, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_anova_E2, interaction = "pairwise") # , adjust = "none"
contr_aov_E2_lFFA1
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.34336 0.1134 20.8 3.027 0.0065
## exchange . English - Chinese 0.44662 0.1134 20.8 3.937 0.0008
## partA . English - Chinese 0.13346 0.1134 20.8 1.176 0.2527
## partB . English - Chinese 0.42759 0.1134 20.8 3.769 0.0011
## . English intact - exchange -0.15819 0.0637 62.5 -2.483 0.0157
## . English intact - partA 0.10620 0.0637 62.5 1.667 0.1005
## . English intact - partB -0.08909 0.0637 62.5 -1.398 0.1669
## . English exchange - partA 0.26440 0.0637 62.5 4.150 0.0001
## . English exchange - partB 0.06910 0.0637 62.5 1.085 0.2823
## . English partA - partB -0.19530 0.0637 62.5 -3.065 0.0032
## . Chinese intact - exchange -0.05493 0.0637 62.5 -0.862 0.3919
## . Chinese intact - partA -0.10370 0.0637 62.5 -1.628 0.1086
## . Chinese intact - partB -0.00486 0.0637 62.5 -0.076 0.9394
## . Chinese exchange - partA -0.04877 0.0637 62.5 -0.765 0.4469
## . Chinese exchange - partB 0.05007 0.0637 62.5 0.786 0.4349
## . Chinese partA - partB 0.09884 0.0637 62.5 1.551 0.1259
2(face vs. word)$$2(intact vs. exchange) ANOVA
anova_E2_lFFA1_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA1,
Label == label_FFA1[[1]],
Layout %in% c("intact", "exchange")))
anova(anova_E2_lFFA1_ie, "pes")
emm_E2_lFFA1_ie <- emmeans(anova_E2_lFFA1_ie, ~ FaceWord + Layout)
(simple_E2_lFFA1_ie <- pairs(emm_E2_lFFA1_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.3434 0.1178 16.8 2.915 0.0098
## exchange . English - Chinese 0.4466 0.1178 16.8 3.791 0.0015
## . English intact - exchange -0.1582 0.0741 21.7 -2.134 0.0444
## . Chinese intact - exchange -0.0549 0.0741 21.7 -0.741 0.4666
2(face vs. word)$$2(top vs. bottom) ANOVA
anova_E2_lFFA1_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA1,
Label == label_FFA1[[1]],
Layout %in% c("partA", "partB")))
anova(anova_E2_lFFA1_tb, "pes")
emm_E2_lFFA1_tb <- emmeans(anova_E2_lFFA1_tb, ~ FaceWord + Layout)
(simple_E2_lFFA1_tb <- pairs(emm_E2_lFFA1_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## partA . English - Chinese 0.1335 0.1089 18.6 1.225 0.2358
## partB . English - Chinese 0.4276 0.1089 18.6 3.926 0.0009
## . English partA - partB -0.1953 0.0647 16.0 -3.019 0.0082
## . Chinese partA - partB 0.0988 0.0647 16.0 1.528 0.1461
anova_E2_rFFA1 <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA1, Label == label_FFA1[[2]]))
anova_E2_rFFA1
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 14 0.13 0.65 .008 .43
## 2 Layout 2.74, 38.34 0.03 2.08 .02 .12
## 3 FaceWord:Layout 2.18, 30.55 0.03 1.39 .009 .26
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E2_rFFA1 <- emmeans(anova_E2_rFFA1, ~ FaceWord * Layout)
emm_aov_E2_rFFA1 %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E2_rFFA1, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## English - Chinese -0.0522 0.0648 14 -0.805 0.4341
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E2_rFFA1, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.0605 0.0406 42 1.490 0.4522
## intact - partA -0.0299 0.0406 42 -0.738 0.8814
## intact - partB 0.0445 0.0406 42 1.098 0.6929
## exchange - partA -0.0904 0.0406 42 -2.228 0.1323
## exchange - partB -0.0159 0.0406 42 -0.393 0.9792
## partA - partB 0.0745 0.0406 42 1.835 0.2716
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E2_rFFA1 <- contrast(emm_aov_E2_rFFA1, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_anova_E2, interaction = "pairwise") # , adjust = "none"
contr_aov_E2_rFFA1
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.00632 0.0794 29.1 0.080 0.9371
## exchange . English - Chinese -0.13621 0.0794 29.1 -1.716 0.0969
## partA . English - Chinese -0.06054 0.0794 29.1 -0.763 0.4519
## partB . English - Chinese -0.01847 0.0794 29.1 -0.233 0.8177
## . English intact - exchange 0.13175 0.0552 83.5 2.387 0.0192
## . English intact - partA 0.00350 0.0552 83.5 0.063 0.9496
## . English intact - partB 0.05694 0.0552 83.5 1.032 0.3052
## . English exchange - partA -0.12825 0.0552 83.5 -2.324 0.0226
## . English exchange - partB -0.07481 0.0552 83.5 -1.355 0.1789
## . English partA - partB 0.05344 0.0552 83.5 0.968 0.3357
## . Chinese intact - exchange -0.01078 0.0552 83.5 -0.195 0.8456
## . Chinese intact - partA -0.06337 0.0552 83.5 -1.148 0.2542
## . Chinese intact - partB 0.03215 0.0552 83.5 0.582 0.5618
## . Chinese exchange - partA -0.05258 0.0552 83.5 -0.953 0.3434
## . Chinese exchange - partB 0.04293 0.0552 83.5 0.778 0.4389
## . Chinese partA - partB 0.09551 0.0552 83.5 1.731 0.0872
anova_E2_rFFA1_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA1,
Label == label_FFA1[[2]],
Layout %in% c("intact", "exchange")))
anova(anova_E2_rFFA1_ie, "pes")
emm_E2_rFFA1_ie <- emmeans(anova_E2_rFFA1_ie, ~ FaceWord + Layout)
(simple_E2_rFFA1_ie <- pairs(emm_E2_rFFA1_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.00632 0.0835 17.6 0.076 0.9405
## exchange . English - Chinese -0.13621 0.0835 17.6 -1.631 0.1206
## . English intact - exchange 0.13175 0.0535 23.4 2.463 0.0215
## . Chinese intact - exchange -0.01078 0.0535 23.4 -0.202 0.8420
2(face vs. word)$$2(top vs. bottom) ANOVA
anova_E2_rFFA1_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA1,
Label == label_FFA1[[2]],
Layout %in% c("partA", "partB")))
anova(anova_E2_rFFA1_tb, "pes")
emm_E2_rFFA1_tb <- emmeans(anova_E2_rFFA1_tb, ~ FaceWord + Layout)
(simple_E2_rFFA1_tb <- pairs(emm_E2_rFFA1_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## partA . English - Chinese -0.0605 0.0751 23.5 -0.806 0.4280
## partB . English - Chinese -0.0185 0.0751 23.5 -0.246 0.8078
## . English partA - partB 0.0534 0.0589 27.8 0.907 0.3723
## . Chinese partA - partB 0.0955 0.0589 27.8 1.621 0.1164
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_aov_E2_lFFA1))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_FFA1 <- cbind(Hemisphere, rbind(as.data.frame(emm_aov_E2_lFFA1), as.data.frame(emm_aov_E2_rFFA1)))
plot_uni_E2_FFA1 <- plot_uni(desp_uni_E2_FFA1, contr_aov_E2_lFFA1, contr_aov_E2_rFFA1, "FFA1")
# ggsave('plot_uni_E2_FFA1.png', plot_uni_E2_FFA1, width = 10, height = 10)
plot_uni_E2_FFA1
The above figure shows the neural respones (beta values) in FFA1 for each condition. The numbers are the p-values for the tests of differences between intact vs. exchange in that condition. Error bars represent 95% confidence intervals. Note: “*p<0.1;**p<0.05;***p<0.01”
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_E2_lFFA1_ie))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_FFA1_ie <- cbind(Hemisphere, rbind(as.data.frame(emm_E2_lFFA1_ie), as.data.frame(emm_E2_rFFA1_ie)))
plot_uni_E2_FFA1_ie <- plot_uni(desp_uni_E2_FFA1_ie, simple_E2_lFFA1_ie, simple_E2_rFFA1_ie, "FFA1", F)
# ggsave('plot_uni_E2_FFA1_ie.png', plot_uni_E2_FFA1_ie, width = 10, height = 5)
plot_uni_E2_FFA1_ie
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_E2_lFFA1_tb))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_FFA1_tb <- cbind(Hemisphere, rbind(as.data.frame(emm_E2_lFFA1_tb), as.data.frame(emm_E2_rFFA1_tb)))
plot_uni_E2_FFA1_tb <- plot_uni(desp_uni_E2_FFA1_tb, simple_E2_lFFA1_tb, simple_E2_rFFA1_tb, "FFA1", F, T)
# ggsave('plot_uni_E2_FFA1_tb.png', plot_uni_E2_FFA1_tb, width = 10, height = 5)
plot_uni_E2_FFA1_tb
# one-sample for results of decode E2 FFA1
one_decode_agg_E2_FFA1 <- {
df_decode_E2_FFA1 %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E2)) %>%
group_by(Hemisphere, ClassifyPair) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E2_FFA1
plot_decode_E2_FFA1 <- plot_decode(one_decode_agg_E2_FFA1, "FFA1")
# ggsave('plot_decode_E2_FFA1.png', plot_decode_E2_FFA1, width = 6.5, height = 16)
plot_decode_E2_FFA1
The above figure shows the decoding accuracy in FFA1 for each pair. The numbers are the p-values for the one-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals. Note: “*p<0.1;**p<0.05;***p<0.01”
one_simi_E2_FFA1 <- {
df_simi_E2_FFA1 %>%
group_by(Hemisphere, Combination) %>%
summarize(mean = t.test(RateAsExchange, mu = 0.5)[[5]],
SD = t.test(RateAsExchange, mu = 0.5)[[7]],
t = t.test(RateAsExchange, mu = 0.5)[[1]],
df = t.test(RateAsExchange, mu = 0.5)[[2]],
p = round(t.test(RateAsExchange, mu = 0.5)[[3]], 5),
lower.CL = t.test(RateAsExchange, mu = 0.5)[[4]][1],
upper.CL = t.test(RateAsExchange, mu = 0.5)[[4]][2],
nullValue = t.test(RateAsExchange, mu = 0.5)[[6]],
alternative = t.test(RateAsExchange, mu = 0.5)[[8]]
)
}
one_simi_E2_FFA1
plot_simi_E2_FFA1 <- plot_simi(one_simi_E2_FFA1, "FFA1")
# ggsave('plot_simi_E2_FFA1.png', plot_simi_E2_FFA1, width = 8, height = 10)
plot_simi_E2_FFA1
The above figure shows the probability of top+bottom being decoded as exchange conditions in FFA1. Patterns of top and bottom were combined with different weights, i.e., “face_top0.25-face_bottom0.75” denotes the linear combinations of face_top and face_bottom with the weights of 0.25/0.75. The numbers are the p-values for the two-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals.
# only keep data for these two labels
df_uni_E2_FFA2 <- filter(df_clean_uni_E2, Label %in% label_FFA2)
df_decode_E2_FFA2 <- filter(df_decode_acc_E2, Label %in% label_FFA2)
df_simi_E2_FFA2 <- filter(df_rate_simi_E2, Label %in% label_FFA2)
df_uni_E2_FFA2 %>%
select(Hemisphere, Label, SessCode) %>%
distinct() %>%
group_by(Hemisphere, Label) %>%
summarize(Count = n())
anova_E2_lFFA2 <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA2, Label == label_FFA2[[1]]))
anova_E2_lFFA2
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 12 0.18 8.65 * .08 .01
## 2 Layout 2.52, 30.24 0.02 0.84 .002 .47
## 3 FaceWord:Layout 2.56, 30.70 0.03 2.83 + .01 .06
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E2_lFFA2 <- emmeans(anova_E2_lFFA2, ~ FaceWord * Layout)
emm_aov_E2_lFFA2 %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E2_lFFA2, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## English - Chinese 0.247 0.084 12 2.940 0.0124
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E2_lFFA2, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.01909 0.0343 36 0.556 0.9442
## intact - partA 0.02635 0.0343 36 0.768 0.8683
## intact - partB -0.02327 0.0343 36 -0.678 0.9047
## exchange - partA 0.00726 0.0343 36 0.212 0.9966
## exchange - partB -0.04236 0.0343 36 -1.234 0.6095
## partA - partB -0.04962 0.0343 36 -1.446 0.4799
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E2_lFFA2 <- contrast(emm_aov_E2_lFFA2, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_anova_E2, interaction = "pairwise") # , adjust = "none"
contr_aov_E2_lFFA2
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.3426 0.1002 23.0 3.418 0.0024
## exchange . English - Chinese 0.2466 0.1002 23.0 2.460 0.0218
## partA . English - Chinese 0.0986 0.1002 23.0 0.984 0.3355
## partB . English - Chinese 0.2999 0.1002 23.0 2.992 0.0065
## . English intact - exchange 0.0671 0.0564 67.5 1.191 0.2379
## . English intact - partA 0.1484 0.0564 67.5 2.633 0.0105
## . English intact - partB -0.0019 0.0564 67.5 -0.034 0.9732
## . English exchange - partA 0.0813 0.0564 67.5 1.442 0.1539
## . English exchange - partB -0.0690 0.0564 67.5 -1.224 0.2250
## . English partA - partB -0.1503 0.0564 67.5 -2.666 0.0096
## . Chinese intact - exchange -0.0289 0.0564 67.5 -0.513 0.6096
## . Chinese intact - partA -0.0957 0.0564 67.5 -1.697 0.0942
## . Chinese intact - partB -0.0446 0.0564 67.5 -0.792 0.4311
## . Chinese exchange - partA -0.0667 0.0564 67.5 -1.184 0.2404
## . Chinese exchange - partB -0.0157 0.0564 67.5 -0.279 0.7811
## . Chinese partA - partB 0.0510 0.0564 67.5 0.905 0.3685
2(face vs. word)$$2(intact vs. exchange) ANOVA
anova_E2_lFFA2_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA2,
Label == label_FFA2[[1]],
Layout %in% c("intact", "exchange")))
anova(anova_E2_lFFA2_ie, "pes")
emm_E2_lFFA2_ie <- emmeans(anova_E2_lFFA2_ie, ~ FaceWord + Layout)
(simple_E2_lFFA2_ie <- pairs(emm_E2_lFFA2_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.3426 0.1014 19.6 3.379 0.0030
## exchange . English - Chinese 0.2466 0.1014 19.6 2.432 0.0247
## . English intact - exchange 0.0671 0.0607 19.7 1.105 0.2825
## . Chinese intact - exchange -0.0289 0.0607 19.7 -0.476 0.6392
2(face vs. word)$$2(top vs. bottom) ANOVA
anova_E2_lFFA2_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA2,
Label == label_FFA2[[1]],
Layout %in% c("partA", "partB")))
anova(anova_E2_lFFA2_tb, "pes")
emm_E2_lFFA2_tb <- emmeans(anova_E2_lFFA2_tb, ~ FaceWord + Layout)
(simple_E2_lFFA2_tb <- pairs(emm_E2_lFFA2_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## partA . English - Chinese 0.0986 0.0991 19.0 0.995 0.3321
## partB . English - Chinese 0.2999 0.0991 19.0 3.027 0.0069
## . English partA - partB -0.1503 0.0643 23.4 -2.336 0.0284
## . Chinese partA - partB 0.0510 0.0643 23.4 0.793 0.4357
anova_E2_rFFA2 <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA2, Label == label_FFA2[[2]]))
anova_E2_rFFA2
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 17 0.05 0.00 <.0001 .99
## 2 Layout 2.55, 43.43 0.01 0.23 .0009 .85
## 3 FaceWord:Layout 2.28, 38.70 0.02 0.72 .005 .51
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E2_rFFA2 <- emmeans(anova_E2_rFFA2, ~ FaceWord * Layout)
emm_aov_E2_rFFA2 %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E2_rFFA2, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## English - Chinese 0.000317 0.0355 17 0.009 0.9930
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E2_rFFA2, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.01582 0.0232 51 0.682 0.9034
## intact - partA -0.00143 0.0232 51 -0.062 0.9999
## intact - partB 0.00305 0.0232 51 0.131 0.9992
## exchange - partA -0.01725 0.0232 51 -0.744 0.8789
## exchange - partB -0.01277 0.0232 51 -0.551 0.9459
## partA - partB 0.00447 0.0232 51 0.193 0.9974
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E2_rFFA2 <- contrast(emm_aov_E2_rFFA2, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_anova_E2, interaction = "pairwise") # , adjust = "none"
contr_aov_E2_rFFA2
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.04175 0.0529 56.1 0.789 0.4335
## exchange . English - Chinese -0.00244 0.0529 56.1 -0.046 0.9634
## partA . English - Chinese -0.05021 0.0529 56.1 -0.949 0.3468
## partB . English - Chinese 0.01216 0.0529 56.1 0.230 0.8190
## . English intact - exchange 0.03791 0.0396 92.9 0.958 0.3403
## . English intact - partA 0.04455 0.0396 92.9 1.126 0.2630
## . English intact - partB 0.01784 0.0396 92.9 0.451 0.6531
## . English exchange - partA 0.00664 0.0396 92.9 0.168 0.8671
## . English exchange - partB -0.02007 0.0396 92.9 -0.507 0.6130
## . English partA - partB -0.02671 0.0396 92.9 -0.675 0.5012
## . Chinese intact - exchange -0.00628 0.0396 92.9 -0.159 0.8743
## . Chinese intact - partA -0.04741 0.0396 92.9 -1.198 0.2338
## . Chinese intact - partB -0.01175 0.0396 92.9 -0.297 0.7672
## . Chinese exchange - partA -0.04113 0.0396 92.9 -1.040 0.3011
## . Chinese exchange - partB -0.00547 0.0396 92.9 -0.138 0.8903
## . Chinese partA - partB 0.03566 0.0396 92.9 0.901 0.3697
anova_E2_rFFA2_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA2,
Label == label_FFA2[[2]],
Layout %in% c("intact", "exchange")))
anova(anova_E2_rFFA2_ie, "pes")
emm_E2_rFFA2_ie <- emmeans(anova_E2_rFFA2_ie, ~ FaceWord + Layout)
(simple_E2_rFFA2_ie <- pairs(emm_E2_rFFA2_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.04175 0.0560 28.9 0.746 0.4619
## exchange . English - Chinese -0.00244 0.0560 28.9 -0.044 0.9656
## . English intact - exchange 0.03791 0.0409 33.8 0.927 0.3607
## . Chinese intact - exchange -0.00628 0.0409 33.8 -0.153 0.8790
2(face vs. word)$$2(top vs. bottom) ANOVA
anova_E2_rFFA2_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_FFA2,
Label == label_FFA2[[2]],
Layout %in% c("partA", "partB")))
anova(anova_E2_rFFA2_tb, "pes")
emm_E2_rFFA2_tb <- emmeans(anova_E2_rFFA2_tb, ~ FaceWord + Layout)
(simple_E2_rFFA2_tb <- pairs(emm_E2_rFFA2_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## partA . English - Chinese -0.0502 0.0497 33.5 -1.011 0.3192
## partB . English - Chinese 0.0122 0.0497 33.5 0.245 0.8080
## . English partA - partB -0.0267 0.0420 25.9 -0.635 0.5308
## . Chinese partA - partB 0.0357 0.0420 25.9 0.848 0.4042
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_aov_E2_lFFA2))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_FFA2 <- cbind(Hemisphere, rbind(as.data.frame(emm_aov_E2_lFFA2), as.data.frame(emm_aov_E2_rFFA2)))
plot_uni_E2_FFA2 <- plot_uni(desp_uni_E2_FFA2, contr_aov_E2_lFFA2, contr_aov_E2_rFFA2, "FFA2")
# ggsave('plot_uni_E2_FFA2.png', plot_uni_E2_FFA2, width = 10, height = 10)
plot_uni_E2_FFA2
The above figure shows the neural respones (beta values) in FFA2 for each condition. The numbers are the p-values for the tests of differences between intact vs. exchange in that condition. Error bars represent 95% confidence intervals. Note: “*p<0.1;**p<0.05;***p<0.01”
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_E2_lFFA2_ie))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_FFA2_ie <- cbind(Hemisphere, rbind(as.data.frame(emm_E2_lFFA2_ie), as.data.frame(emm_E2_rFFA2_ie)))
plot_uni_E2_FFA2_ie <- plot_uni(desp_uni_E2_FFA2_ie, simple_E2_lFFA2_ie, simple_E2_rFFA2_ie, "FFA2", F)
# ggsave('plot_uni_E2_FFA2_ie.png', plot_uni_E2_FFA2_ie, width = 10, height = 5)
plot_uni_E2_FFA2_ie
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_E2_lFFA2_tb))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_FFA2_tb <- cbind(Hemisphere, rbind(as.data.frame(emm_E2_lFFA2_tb), as.data.frame(emm_E2_rFFA2_tb)))
plot_uni_E2_FFA2_tb <- plot_uni(desp_uni_E2_FFA2_tb, simple_E2_lFFA2_tb, simple_E2_rFFA2_tb, "FFA2", F, T)
# ggsave('plot_uni_E2_FFA2_tb.png', plot_uni_E2_FFA2_tb, width = 10, height = 5)
plot_uni_E2_FFA2_tb
# one-sample for results of decode E2 FFA2
one_decode_agg_E2_FFA2 <- {
df_decode_E2_FFA2 %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E2)) %>%
group_by(Hemisphere, ClassifyPair) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E2_FFA2
plot_decode_E2_FFA2 <- plot_decode(one_decode_agg_E2_FFA2, "FFA2")
# ggsave('plot_decode_E2_FFA2.png', plot_decode_E2_FFA2, width = 6.5, height = 16)
plot_decode_E2_FFA2
The above figure shows the decoding accuracy in FFA2 for each pair. The numbers are the p-values for the one-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals. Note: “*p<0.1;**p<0.05;***p<0.01”
# Similarity of top + bottom to intact vs. exchange in FFA
one_simi_E2_FFA2 <- {
df_simi_E2_FFA2 %>%
group_by(Hemisphere, Combination) %>%
summarize(mean = t.test(RateAsExchange, mu = 0.5)[[5]],
SD = t.test(RateAsExchange, mu = 0.5)[[7]],
t = t.test(RateAsExchange, mu = 0.5)[[1]],
df = t.test(RateAsExchange, mu = 0.5)[[2]],
p = round(t.test(RateAsExchange, mu = 0.5)[[3]], 5),
lower.CL = t.test(RateAsExchange, mu = 0.5)[[4]][1],
upper.CL = t.test(RateAsExchange, mu = 0.5)[[4]][2],
nullValue = t.test(RateAsExchange, mu = 0.5)[[6]],
alternative = t.test(RateAsExchange, mu = 0.5)[[8]]
)
}
one_simi_E2_FFA2
plot_simi_E2_FFA2 <- plot_simi(one_simi_E2_FFA2, "FFA2")
# ggsave('plot_simi_E2_FFA2.png', plot_simi_E2_FFA2, width = 8, height = 10)
plot_simi_E2_FFA2
The above figure shows the probability of top+bottom being decoded as exchange conditions in FFA2. Patterns of top and bottom were combined with different weights, i.e., “face_top0.25-face_bottom0.75” denotes the linear combinations of face_top and face_bottom with the weights of 0.25/0.75. The numbers are the p-values for the two-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals.
# only keep data for these two labels
df_uni_E2_VWFA <- filter(df_clean_uni_E2, Label %in% label_VWFA)
df_decode_E2_VWFA <- filter(df_decode_acc_E2, Label %in% label_VWFA)
df_simi_E2_VWFA <- filter(df_rate_simi_E2, Label %in% label_VWFA)
# subjects used for each hemisphere
# unique(as.character((df_univar_agg_E2_VWFA %>% filter(Label == label_VWFA_E2))$SubjCode))
df_uni_E2_VWFA %>%
select(Hemisphere, Label, SessCode) %>%
distinct() %>%
group_by(Hemisphere, Label) %>%
summarize(Count = n())
anova_E2_VWFA <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_VWFA, Label == label_VWFA))
anova_E2_VWFA
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 13 0.26 66.19 *** .35 <.0001
## 2 Layout 2.25, 29.19 0.03 10.51 *** .02 .0002
## 3 FaceWord:Layout 1.62, 21.06 0.06 9.23 ** .03 .002
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E2_VWFA <- emmeans(anova_E2_VWFA, ~ FaceWord * Layout)
emm_aov_E2_VWFA %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E2_VWFA, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## English - Chinese 0.786 0.0966 13 8.135 <.0001
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E2_VWFA, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange -0.1739 0.0366 39 -4.748 0.0002
## intact - partA -0.0150 0.0366 39 -0.411 0.9763
## intact - partB -0.1217 0.0366 39 -3.323 0.0101
## exchange - partA 0.1588 0.0366 39 4.337 0.0006
## exchange - partB 0.0522 0.0366 39 1.425 0.4917
## partA - partB -0.1066 0.0366 39 -2.912 0.0289
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E2_VWFA <- contrast(emm_aov_E2_VWFA, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_aov_E2, interaction = "pairwise") # , adjust = "none"
contr_aov_E2_VWFA
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.7486 0.1124 22.9 6.662 <.0001
## exchange . English - Chinese 0.9659 0.1124 22.9 8.596 <.0001
## partA . English - Chinese 0.5173 0.1124 22.9 4.604 0.0001
## partB . English - Chinese 0.9114 0.1124 22.9 8.111 <.0001
## . English intact - exchange -0.2825 0.0595 73.7 -4.749 <.0001
## . English intact - partA 0.1006 0.0595 73.7 1.692 0.0950
## . English intact - partB -0.2031 0.0595 73.7 -3.414 0.0010
## . English exchange - partA 0.3831 0.0595 73.7 6.441 <.0001
## . English exchange - partB 0.0794 0.0595 73.7 1.335 0.1859
## . English partA - partB -0.3037 0.0595 73.7 -5.106 <.0001
## . Chinese intact - exchange -0.0652 0.0595 73.7 -1.096 0.2766
## . Chinese intact - partA -0.1307 0.0595 73.7 -2.197 0.0312
## . Chinese intact - partB -0.0403 0.0595 73.7 -0.677 0.5006
## . Chinese exchange - partA -0.0655 0.0595 73.7 -1.101 0.2746
## . Chinese exchange - partB 0.0249 0.0595 73.7 0.419 0.6762
## . Chinese partA - partB 0.0904 0.0595 73.7 1.520 0.1328
2(face vs. word)$$2(intact vs. exchange) ANOVA
anova_E2_VWFA_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_VWFA,
Label == label_VWFA[[1]],
Layout %in% c("intact", "exchange")))
anova(anova_E2_VWFA_ie, "pes")
emm_E2_VWFA_ie <- emmeans(anova_E2_VWFA_ie, ~ FaceWord + Layout)
(simple_E2_VWFA_ie <- pairs(emm_E2_VWFA_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese 0.7486 0.1161 18.2 6.448 <.0001
## exchange . English - Chinese 0.9659 0.1161 18.2 8.319 <.0001
## . English intact - exchange -0.2825 0.0625 25.1 -4.521 0.0001
## . Chinese intact - exchange -0.0652 0.0625 25.1 -1.043 0.3067
2(face vs. word)$$2(top vs. bottom) ANOVA
anova_E2_VWFA_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_VWFA,
Label == label_VWFA[[1]],
Layout %in% c("partA", "partB")))
anova(anova_E2_VWFA_tb, "pes")
emm_E2_VWFA_tb <- emmeans(anova_E2_VWFA_tb, ~ FaceWord + Layout)
(simple_E2_VWFA_tb <- pairs(emm_E2_VWFA_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## partA . English - Chinese 0.5173 0.1085 22.3 4.768 0.0001
## partB . English - Chinese 0.9114 0.1085 22.3 8.401 <.0001
## . English partA - partB -0.3037 0.0693 21.6 -4.383 0.0002
## . Chinese partA - partB 0.0904 0.0693 21.6 1.305 0.2057
nRow_E2 <-nrow(as.data.frame(emm_aov_E2_VWFA))
Hemisphere <- c(rep("left", nRow_E2))
desp_uni_E2_VWFA <- cbind(Hemisphere, as.data.frame(emm_aov_E2_VWFA))
plot_uni_E2_VWFA <- plot_uni_vwfa(desp_uni_E2_VWFA, contr_aov_E2_VWFA, "VWFA")
# ggsave('plot_uni_E2_VWFA.png', plot_uni_E2_VWFA, width = 5.5, height = 10)
plot_uni_E2_VWFA
The above figure shows the neural respones (beta values) in VWFA for each condition. The numbers are the p-values for the tests of differences between intact vs. exchange in that condition. Error bars represent 95% confidence intervals. Note: *, p < .05
nRow_E2 <-nrow(as.data.frame(emm_E2_VWFA_ie))
Hemisphere <- c(rep("left", nRow_E2))
desp_uni_E2_VWFA_ie <- cbind(Hemisphere, as.data.frame(emm_E2_VWFA_ie))
plot_uni_E2_VWFA_ie <- plot_uni_vwfa(desp_uni_E2_VWFA_ie, simple_E2_VWFA_tb, "VWFA", FALSE)
# ggsave('plot_uni_E2_VWFA_ie.png', plot_uni_E2_VWFA_ie, width = 5.5, height = 10)
plot_uni_E2_VWFA_ie
nRow_E2 <-nrow(as.data.frame(emm_E2_VWFA_tb))
Hemisphere <- c(rep("left", nRow_E2))
desp_uni_E2_VWFA_tb <- cbind(Hemisphere, as.data.frame(emm_E2_VWFA_tb))
plot_uni_E2_VWFA_tb <- plot_uni_vwfa(desp_uni_E2_VWFA_tb, simple_E2_VWFA_tb, "VWFA", FALSE, T)
# ggsave('plot_uni_E2_VWFA_tb.png', plot_uni_E2_VWFA_tb, width = 5.5, height = 10)
plot_uni_E2_VWFA_tb
# one-sample for results of decode E2 VWFA
one_decode_agg_E2_VWFA <- {
df_decode_E2_VWFA %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E2)) %>%
group_by(Hemisphere, ClassifyPair) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E2_VWFA
plot_decode_E2_VWFA <- plot_decode_vwfa(one_decode_agg_E2_VWFA, "VWFA")
# ggsave('plot_decode_E2_VWFA.png', plot_decode_E2_VWFA, width = 4, height = 16)
plot_decode_E2_VWFA
The above figure shows the decoding accuracy in VWFA for each pair. The numbers are the p-values for the one-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals. Note: ***, p <.001
# Similarity of top + bottom to intact vs. exchange in VWFA
one_simi_E2_VWFA <- {
df_simi_E2_VWFA %>%
group_by(Hemisphere, Combination) %>%
summarize(mean = t.test(RateAsExchange, mu = 0.5)[[5]],
SD = t.test(RateAsExchange, mu = 0.5)[[7]],
t = t.test(RateAsExchange, mu = 0.5)[[1]],
df = t.test(RateAsExchange, mu = 0.5)[[2]],
p = round(t.test(RateAsExchange, mu = 0.5)[[3]], 5),
lower.CL = t.test(RateAsExchange, mu = 0.5)[[4]][1],
upper.CL = t.test(RateAsExchange, mu = 0.5)[[4]][2],
nullValue = t.test(RateAsExchange, mu = 0.5)[[6]],
alternative = t.test(RateAsExchange, mu = 0.5)[[8]]
)
}
one_simi_E2_VWFA
plot_simi_E2_VWFA <- plot_simi_vwfa(one_simi_E2_VWFA, "VWFA")
# ggsave('plot_simi_E2_VWFA.png', plot_simi_E2_VWFA, width = 4.25, height = 10)
plot_simi_E2_VWFA
The above figure shows the probability of top+bottom being decoded as exchange conditions in VWFA. Patterns of top and bottom were combined with different weights, i.e., “face_top0.25-face_bottom0.75” denotes the linear combinations of face_top and face_bottom with the weights of 0.25/0.75. The numbers are the p-values for the two-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals.
# only keep data for these two labels
df_uni_E2_LO <- filter(df_clean_uni_E2, Label %in% label_LO)
df_decode_E2_LO <- filter(df_decode_acc_E2, Label %in% label_LO)
df_simi_E2_LO <- filter(df_rate_simi_E2, Label %in% label_LO)
df_uni_E2_LO %>%
select(Hemisphere, Label, SessCode) %>%
distinct() %>%
group_by(Hemisphere, Label) %>%
summarize(Count = n())
anova_E2_lLO <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_LO, Label == label_LO[[1]]))
anova_E2_lLO
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 15 0.19 4.52 + .01 .05
## 2 Layout 1.75, 26.31 0.05 1.33 .002 .28
## 3 FaceWord:Layout 2.06, 30.90 0.07 1.75 .004 .19
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E2_lLO <- emmeans(anova_E2_lLO, ~ FaceWord * Layout)
emm_aov_E2_lLO %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E2_lLO, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## English - Chinese -0.163 0.0768 15 -2.126 0.0506
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E2_lLO, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange -0.05319 0.0414 45 -1.284 0.5779
## intact - partA -0.08137 0.0414 45 -1.964 0.2170
## intact - partB -0.04894 0.0414 45 -1.181 0.6419
## exchange - partA -0.02818 0.0414 45 -0.680 0.9042
## exchange - partB 0.00426 0.0414 45 0.103 0.9996
## partA - partB 0.03244 0.0414 45 0.783 0.8619
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E2_lLO <- contrast(emm_aov_E2_lLO, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_aov_E2, interaction = "pairwise") # , adjust = "none"
contr_aov_E2_lLO
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese -0.1079 0.1006 37.8 -1.073 0.2903
## exchange . English - Chinese -0.0773 0.1006 37.8 -0.768 0.4470
## partA . English - Chinese -0.3014 0.1006 37.8 -2.996 0.0048
## partB . English - Chinese -0.1661 0.1006 37.8 -1.651 0.1070
## . English intact - exchange -0.0685 0.0673 85.0 -1.017 0.3120
## . English intact - partA 0.0154 0.0673 85.0 0.228 0.8201
## . English intact - partB -0.0198 0.0673 85.0 -0.295 0.7691
## . English exchange - partA 0.0838 0.0673 85.0 1.245 0.2164
## . English exchange - partB 0.0487 0.0673 85.0 0.723 0.4719
## . English partA - partB -0.0352 0.0673 85.0 -0.523 0.6025
## . Chinese intact - exchange -0.0379 0.0673 85.0 -0.563 0.5750
## . Chinese intact - partA -0.1781 0.0673 85.0 -2.645 0.0097
## . Chinese intact - partB -0.0780 0.0673 85.0 -1.159 0.2497
## . Chinese exchange - partA -0.1402 0.0673 85.0 -2.082 0.0403
## . Chinese exchange - partB -0.0401 0.0673 85.0 -0.596 0.5527
## . Chinese partA - partB 0.1001 0.0673 85.0 1.486 0.1409
2(face vs. word)$$2(intact vs. exchange) ANOVA
anova_E2_lLO_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_LO,
Label == label_LO[[1]],
Layout %in% c("intact", "exchange")))
anova(anova_E2_lLO_ie, "pes")
emm_E2_lLO_ie <- emmeans(anova_E2_lLO_ie, ~ FaceWord + Layout)
(simple_E2_lLO_ie <- pairs(emm_E2_lLO_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese -0.1079 0.0962 21.3 -1.122 0.2745
## exchange . English - Chinese -0.0773 0.0962 21.3 -0.804 0.4305
## . English intact - exchange -0.0685 0.0727 26.4 -0.942 0.3547
## . Chinese intact - exchange -0.0379 0.0727 26.4 -0.521 0.6065
2(face vs. word)$$2(top vs. bottom) ANOVA
anova_E2_lLO_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_LO,
Label == label_LO[[1]],
Layout %in% c("partA", "partB")))
anova(anova_E2_lLO_tb, "pes")
emm_E2_lLO_tb <- emmeans(anova_E2_lLO_tb, ~ FaceWord + Layout)
(simple_E2_lLO_tb <- pairs(emm_E2_lLO_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## partA . English - Chinese -0.3014 0.105 29.6 -2.875 0.0074
## partB . English - Chinese -0.1661 0.105 29.6 -1.585 0.1236
## . English partA - partB -0.0352 0.075 19.5 -0.470 0.6439
## . Chinese partA - partB 0.1001 0.075 19.5 1.335 0.1973
aov_E2_rLO <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_LO, Label == label_LO[[2]]))
aov_E2_rLO
## Anova Table (Type 3 tests)
##
## Response: Response
## Effect df MSE F ges p.value
## 1 FaceWord 1, 16 0.15 44.67 *** .03 <.0001
## 2 Layout 2.30, 36.79 0.07 3.03 + .002 .05
## 3 FaceWord:Layout 2.18, 34.93 0.04 1.05 .0005 .36
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '+' 0.1 ' ' 1
##
## Sphericity correction method: GG
emm_aov_E2_rLO <- emmeans(aov_E2_rLO, ~ FaceWord * Layout)
emm_aov_E2_rLO %>%
as.data.frame() %>%
arrange(FaceWord)
Posthoc analysis for the main effects:
contrast(emmeans(emm_aov_E2_rLO, ~ FaceWord), "pairwise")
## contrast estimate SE df t.ratio p.value
## English - Chinese -0.438 0.0655 16 -6.684 <.0001
##
## Results are averaged over the levels of: Layout
contrast(emmeans(emm_aov_E2_rLO, ~ Layout), "pairwise") # , adjust = "none"
## contrast estimate SE df t.ratio p.value
## intact - exchange 0.08614 0.0568 48 1.516 0.4360
## intact - partA -0.08487 0.0568 48 -1.494 0.4491
## intact - partB -0.00697 0.0568 48 -0.123 0.9993
## exchange - partA -0.17101 0.0568 48 -3.010 0.0209
## exchange - partB -0.09311 0.0568 48 -1.639 0.3671
## partA - partB 0.07790 0.0568 48 1.371 0.5233
##
## Results are averaged over the levels of: FaceWord
## P value adjustment: tukey method for comparing a family of 4 estimates
Results of simple effect analysis (uncorrected):
contr_aov_E2_rLO <- contrast(emm_aov_E2_rLO, "pairwise", simple = "each", combine = TRUE, adjust = "none")
# contrast(emm_uni_aov_E2, interaction = "pairwise") # , adjust = "none"
contr_aov_E2_rLO
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese -0.3835 0.0837 37.6 -4.584 <.0001
## exchange . English - Chinese -0.5192 0.0837 37.6 -6.206 <.0001
## partA . English - Chinese -0.3966 0.0837 37.6 -4.741 <.0001
## partB . English - Chinese -0.4507 0.0837 37.6 -5.388 <.0001
## . English intact - exchange 0.1540 0.0710 88.9 2.170 0.0327
## . English intact - partA -0.0783 0.0710 88.9 -1.103 0.2729
## . English intact - partB 0.0267 0.0710 88.9 0.376 0.7081
## . English exchange - partA -0.2323 0.0710 88.9 -3.273 0.0015
## . English exchange - partB -0.1273 0.0710 88.9 -1.794 0.0762
## . English partA - partB 0.1050 0.0710 88.9 1.479 0.1427
## . Chinese intact - exchange 0.0183 0.0710 88.9 0.257 0.7975
## . Chinese intact - partA -0.0914 0.0710 88.9 -1.288 0.2010
## . Chinese intact - partB -0.0406 0.0710 88.9 -0.572 0.5687
## . Chinese exchange - partA -0.1097 0.0710 88.9 -1.546 0.1257
## . Chinese exchange - partB -0.0589 0.0710 88.9 -0.830 0.4090
## . Chinese partA - partB 0.0508 0.0710 88.9 0.716 0.4757
anova_E2_rLO_ie <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_LO,
Label == label_LO[[2]],
Layout %in% c("intact", "exchange")))
anova(anova_E2_rLO_ie, "pes")
emm_E2_rLO_ie <- emmeans(anova_E2_rLO_ie, ~ FaceWord + Layout)
(simple_E2_rLO_ie <- pairs(emm_E2_rLO_ie, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## intact . English - Chinese -0.3835 0.0832 25.8 -4.607 0.0001
## exchange . English - Chinese -0.5192 0.0832 25.8 -6.238 <.0001
## . English intact - exchange 0.1540 0.0712 29.3 2.164 0.0388
## . Chinese intact - exchange 0.0183 0.0712 29.3 0.257 0.7993
2(face vs. word)$$2(top vs. bottom) ANOVA
anova_E2_rLO_tb <- aov_4(Response ~ FaceWord * Layout + (FaceWord * Layout | SubjCode),
data = filter(df_uni_E2_LO,
Label == label_LO[[2]],
Layout %in% c("partA", "partB")))
anova(anova_E2_rLO_tb, "pes")
emm_E2_rLO_tb <- emmeans(anova_E2_rLO_tb, ~ FaceWord + Layout)
(simple_E2_rLO_tb <- pairs(emm_E2_rLO_tb, simple = "each", combine = TRUE, adjust = "none"))
## Layout FaceWord contrast estimate SE df t.ratio p.value
## partA . English - Chinese -0.3966 0.0841 29.6 -4.717 0.0001
## partB . English - Chinese -0.4507 0.0841 29.6 -5.361 <.0001
## . English partA - partB 0.1050 0.0629 29.6 1.669 0.1056
## . Chinese partA - partB 0.0508 0.0629 29.6 0.808 0.4254
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_aov_E2_lLO))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_LO <- cbind(Hemisphere, rbind(as.data.frame(emm_aov_E2_lLO), as.data.frame(emm_aov_E2_rLO)))
plot_uni_E2_LO <- plot_uni(desp_uni_E2_LO, contr_aov_E2_lLO, contr_aov_E2_rLO, "LO")
# ggsave('plot_uni_E2_LO.png', plot_uni_E2_LO, width = 10, height = 10)
plot_uni_E2_LO
The above figure shows the neural respones (beta values) in LO for each condition. The numbers are the p-values for the tests of differences between intact vs. exchange in that condition. Error bars represent 95% confidence intervals. Note: *, p < .05
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_E2_lLO_ie))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_LO_ie <- cbind(Hemisphere, rbind(as.data.frame(emm_E2_lLO_ie), as.data.frame(emm_E2_rLO_ie)))
plot_uni_E2_LO_ie <- plot_uni(desp_uni_E2_LO_ie, simple_E2_lLO_ie, simple_E2_rLO_ie, "LO", F)
# ggsave('plot_uni_E2_LO_ie.png', plot_uni_E2_LO_ie, width = 10, height = 5)
plot_uni_E2_LO_ie
# add the column of Hemisphere
nRow_E2 <-nrow(as.data.frame(emm_E2_lLO_tb))
Hemisphere <- c(rep("left", nRow_E2), rep("right", nRow_E2))
desp_uni_E2_LO_tb <- cbind(Hemisphere, rbind(as.data.frame(emm_E2_lLO_tb), as.data.frame(emm_E2_rLO_tb)))
plot_uni_E2_LO_tb <- plot_uni(desp_uni_E2_LO_tb, simple_E2_lLO_tb, simple_E2_rLO_tb, "LO", F, T)
# ggsave('plot_uni_E2_LO_tb.png', plot_uni_E2_LO_tb, width = 10, height = 5)
plot_uni_E2_LO_tb
# one-sample for results of decode E2 LO
one_decode_agg_E2_LO <- {
df_decode_E2_LO %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E2)) %>%
group_by(Hemisphere, ClassifyPair) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E2_LO
plot_decode_E2_LO <- plot_decode(one_decode_agg_E2_LO, "LO")
# ggsave('plot_decode_E2_LO.png', plot_decode_E2_LO, width = 6.5, height = 16)
plot_decode_E2_LO
The above figure shows the decoding accuracy in LO for each pair. The numbers are the p-values for the one-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals. Note: , p < .01; *, p <.001
# Similarity of top + bottom to intact vs. exchange in LO
one_simi_E2_LO <- {
df_simi_E2_LO %>%
group_by(Hemisphere, Combination) %>%
summarize(mean = t.test(RateAsExchange, mu = 0.5)[[5]],
SD = t.test(RateAsExchange, mu = 0.5)[[7]],
t = t.test(RateAsExchange, mu = 0.5)[[1]],
df = t.test(RateAsExchange, mu = 0.5)[[2]],
p = round(t.test(RateAsExchange, mu = 0.5)[[3]], 5),
lower.CL = t.test(RateAsExchange, mu = 0.5)[[4]][1],
upper.CL = t.test(RateAsExchange, mu = 0.5)[[4]][2],
nullValue = t.test(RateAsExchange, mu = 0.5)[[6]],
alternative = t.test(RateAsExchange, mu = 0.5)[[8]]
)
}
one_simi_E2_LO
plot_simi_E2_LO <- plot_simi(one_simi_E2_LO, "LO")
# ggsave('plot_simi_E2_LO.png', plot_simi_E2_LO, width = 8, height = 10)
plot_simi_E2_LO
The above figure shows the probability of top+bottom being decoded as exchange conditions in LO. Patterns of top and bottom were combined with different weights, i.e., “face_top0.25-face_bottom0.75” denotes the linear combinations of face_top and face_bottom with the weights of 0.25/0.75. The numbers are the p-values for the two-tail one-sample t-tests against the chance level (0.5) in that condition. Error bars represent 95% confidence intervals.
Labels for LO were defined with the maximum area of 100, 150, 200 and 300 mm^2, respecitvely.
df_label_LO_E1 <- read_csv(file.path("data", "faceword_E1_Label_LO_HJ.csv")) %>%
mutate(roi = str_remove(Label, "roi."),
roi = str_remove(roi, ".label")) %>%
mutate(Subject = str_replace(SubjCode, "\\_.*", ""))
# df_label %>% head()
df_label_LO_E1 %>%
select(SubjCode, roi, Size) %>%
pivot_wider(names_from = roi, values_from = Size) %>%
arrange(SubjCode)
The above table displays the size (in mm2) of each label for each participant. (NA denotes that this label is not available for that particiapnt.)
df_label_LO_E1 %>%
select(SubjCode, roi, NVtxs) %>%
pivot_wider(names_from = roi, values_from = NVtxs) %>%
arrange(SubjCode)
The above table displays the number of vertices for each label and each participant. (NA denotes that this label is not available for that particiapnt.)
df_label_LO_E1 %>%
group_by(Label, roi) %>%
summarize(Count = n(),
meanSize = mean(Size),
meanNVtx = mean(NVtxs))
df_nlabel_LO_E1 <- df_label_LO_E1 %>%
filter(Size > nVtx_size_min) %>%
group_by(Label, roi) %>%
summarize(Count = n(),
meanSize = mean(Size),
meanNVtx = mean(NVtxs))
df_nlabel_LO_E1
The above table dispalys the number of participants included in the following analyses for each ROI. (VWFA is only found on the left hemisphere.)
# load decoding results in LO
df_LO_area_E1 <- read_csv(file.path("data", "faceword_E1_Decode_LO_noz.csv")) %>%
select(Label, SessCode, ClassifyPair, ACC) %>%
mutate(Hemisphere = if_else(grepl("lh", Label), "left",
if_else(grepl("rh", Label), "right", "NA")),
Subject = str_remove(SessCode, "\\_.*")) %>%
left_join(df_label_LO_E1, by = c("Label", "Subject")) %>%
filter(Size > nVtx_size_min)
df_decode_LO_acc_E1 <- df_LO_area_E1 %>%
group_by(Hemisphere, Label, SessCode, ClassifyPair) %>% # divide the data into groups by these columns
summarize(Accuracy = mean(ACC), Count = n()) %>%
ungroup()
df_decode_LO_acc_E1
# one-sample for results of decode E1 LO
one_decode_agg_E1_LO_area <- {
df_decode_LO_acc_E1 %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E1)) %>%
group_by(Hemisphere, ClassifyPair, Label) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E1_LO_area
df_label_LO_E2 <- read_csv(file.path("data", "faceword_E2_Label_LO_HJ.csv")) %>%
mutate(roi = str_remove(Label, "roi."),
roi = str_remove(roi, ".label")) %>%
mutate(Subject = str_replace(SubjCode, "\\_.*", ""))
# df_label %>% head()
df_label_LO_E2 %>%
select(SubjCode, roi, Size) %>%
pivot_wider(names_from = roi, values_from = Size) %>%
arrange(SubjCode)
The above table displays the size (in mm2) of each label for each participant. (NA denotes that this label is not available for that particiapnt.)
df_label_LO_E2 %>%
select(SubjCode, roi, NVtxs) %>%
pivot_wider(names_from = roi, values_from = NVtxs) %>%
arrange(SubjCode)
The above table displays the number of vertices for each label and each participant. (NA denotes that this label is not available for that particiapnt.)
df_label_LO_E2 %>%
group_by(Label, roi) %>%
summarize(Count = n(),
meanSize = mean(Size),
meanNVtx = mean(NVtxs))
df_nlabel_LO_E2 <- df_label_LO_E2 %>%
filter(Size > nVtx_size_min) %>%
group_by(Label, roi) %>%
summarize(Count = n(),
meanSize = mean(Size),
meanNVtx = mean(NVtxs))
df_nlabel_LO_E2
The above table dispalys the number of participants included in the following analyses for each ROI. (VWFA is only found on the left hemisphere.)
# load decoding results in LO
df_LO_area_E2 <- read_csv(file.path("data", "faceword_E2_Decode_LO_noz.csv")) %>%
select(Label, SessCode, ClassifyPair, ACC) %>%
mutate(Hemisphere = if_else(grepl("lh", Label), "left",
if_else(grepl("rh", Label), "right", "NA")),
Subject = str_remove(SessCode, "\\_.*"),
ClassifyPair = fct_recode(ClassifyPair,
`Chinese_partA-Chinese_partB` = "Chinese_top-Chinese_bottom",
`English_partA-English_partB` = "English_top-English_bottom"),
ClassifyPair = factor(ClassifyPair, levels = pair_order_E2)) %>%
left_join(df_label_LO_E2, by = c("Label", "Subject")) %>%
filter(Size > nVtx_size_min)
df_decode_LO_acc_E2 <- df_LO_area_E2 %>%
group_by(Hemisphere, Label, SessCode, ClassifyPair) %>% # divide the data into groups by these columns
summarize(Accuracy = mean(ACC), Count = n()) %>%
ungroup()
df_decode_LO_acc_E2
# one-sample for results of decode E2 LO
one_decode_agg_E2_LO_area <- {
df_decode_LO_acc_E2 %>%
mutate(ClassifyPair = fct_relevel(ClassifyPair, pair_order_E2)) %>%
group_by(Hemisphere, ClassifyPair, Label) %>%
summarize(mean = t.test(Accuracy, mu = 0.5, alternative = "greater")[[5]],
SD = t.test(Accuracy, mu = 0.5, alternative = "greater")[[7]],
t = t.test(Accuracy, mu = 0.5, alternative = "greater")[[1]],
df = t.test(Accuracy, mu = 0.5, alternative = "greater")[[2]],
p = round(t.test(Accuracy, mu = 0.5, alternative = "greater")[[3]], 5),
lower.CL = t.test(Accuracy, mu = 0.5, alternative = "greater")[[4]][1],
upper.CL = mean * 2 - lower.CL, # t.test(Accuracy, mu = 0.5, alternative = "two.sided")[[4]][2],
nullValue = t.test(Accuracy, mu = 0.5, alternative = "greater")[[6]],
alternative = t.test(Accuracy, mu = 0.5, alternative = "greater")[[8]]
)
}
one_decode_agg_E2_LO_area
df_decode_LO_area <- rbind(mutate(one_decode_agg_E1_LO_area, Exp = "E1"),
mutate(one_decode_agg_E2_LO_area, Exp = "E2")) %>%
separate(ClassifyPair, c("Stimuli1", "Layout1", "Stimuli2", "Layout2")) %>%
filter(!(Label %in% c("roi.lh.o-vs-scr.label", "roi.rh.o-vs-scr.label"))) %>%
mutate(Stimuli1 = if_else(Stimuli1 %in% c("face", "word"), paste0(Stimuli1, "s"), Stimuli1),
Stimuli2 = if_else(Stimuli2 %in% c("face", "word"), paste0(Stimuli2, "s"), Stimuli2),
Stimuli = ifelse(Stimuli1 == Stimuli2, Stimuli1,
paste(Stimuli1, Stimuli2, sep = "\nvs.\n")),
Layout = ifelse(Layout1 == Layout2, Layout1,
paste(toTitleCase(Layout1), toTitleCase(Layout2), sep = "\nvs.\n")),
Area = substr(Label, 18, 20)) %>%
select(-c(Stimuli1, Stimuli2, Layout1, Layout2))
# save the df for intact English vs. intact Chinese
df_intact_LO_area <- filter(df_decode_LO_area, Layout == "intact") %>%
mutate(Stimuli = fct_rev(Stimuli))
dat_text_intact_LO_area <- data.frame(
Stimuli = c("faces\nvs.\nwords", "English\nvs.\nChinese"),
Hemisphere = c("left"),
label = c("Chinese speakers: \nChinese faces vs. characters",
"English speakers: \nEnglish words vs. Chinese characters"),
x = .5, # c(1.35, 1.6),
y = 1.05
)
# intact
plot_intact_LO_area <- ggplot(df_intact_LO_area, aes(y = mean, x = Area)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = expression(paste("Lateral Occipital Label Area (", mm^2, ")")), y = "Accuracy") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_intact_LO_area, aes(x = x, y = y, label = label), size = 4, fontface = "bold", hjust=0) + #
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_decode_LO_area_intact.pdf', plot_intact_LO_area, width = 8, height = 8)
plot_intact_LO_area
df_inex_LO_area <- df_decode_LO_area %>%
filter(str_detect(Stimuli, "vs.", negate = TRUE),
Layout == "Intact\nvs.\nExchange")
df_inex_LO_area$Stimuli <- fct_relevel(df_inex_LO_area$Stimuli, "English", after = Inf)
df_inex_LO_area$Stimuli <- fct_relevel(df_inex_LO_area$Stimuli, "Chinese", after = Inf)
dat_text_inex_LO_area <- data.frame(
Stimuli = levels(df_inex_LO_area$Stimuli),
Hemisphere = c("left"),
label = c("Chinese speakers: \nChinese faces",
"Chinese speakers: \nChinese characters",
"English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_inex_LO_area$Stimuli),
x = .5, # c(1, 1.1, 1.2, 1.2),
y = 1.05
)
plot_inex_LO_area <- ggplot(df_inex_LO_area, aes(y = mean, x = Area)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(title = "Intact vs. Exchange", x = expression(paste("Lateral Occipital Label Area (", mm^2, ")")), y = "Accuracy") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_LO_area, mapping = aes(x = x, y = y, label = label), size = 6, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 13), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_decode_LO_area_inex.pdf', plot_inex_LO_area, width = 8, height = 16)
plot_inex_LO_area
dat_text_inex_LO_area_lr_fw <- data.frame(
Stimuli = levels(df_inex_LO_area$Stimuli)[1:2],
Hemisphere = "left",
label = c("Chinese speakers: \nChinese faces",
"Chinese speakers: \nChinese characters"),
x = .5, # c(1, 1.1),
y = 1.05
)
plot_inex_LO_area_E1 <- ggplot(filter(df_inex_LO_area, Exp == "E1"), aes(y = mean, x = Area)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = expression(paste("Lateral Occipital Label Area (", mm^2, ")")), y = "Accuracy") + # set the names for main, x and y axises Experiment 1
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_LO_area_lr_fw, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
dat_text_inex_LO_area_lr_ec <- data.frame(
Stimuli = levels(df_inex_LO_area$Stimuli)[3:4],
Hemisphere = "left",
label = c("English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_inex_LO_area$Stimuli)[3:4],
x = .5, # c(1.1, 1.2),
y = 1.05
)
plot_inex_LO_area_E2 <- ggplot(filter(df_inex_LO_area, Exp == "E2"), aes(y = mean, x = Area)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = expression(paste("Lateral Occipital Label Area (", mm^2, ")")), y = "Accuracy") + # set the names for main, x and y axises Experiment 2
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_LO_area_lr_ec, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
plot_inex_LO_area_lr <- ggarrange(plot_inex_LO_area_E1, plot_inex_LO_area_E2, ncol = 2,
# labels = c("", "Intact vs. Exchange"),
# label.x = -0.36,
# label.y = 1,
font.label = list(size = 24))
# ggsave('plot_decode_LO_area_inex_lr.pdf', plot_inex_LO_area_lr, width = 15, height = 8)
plot_inex_LO_area_lr
df_parts_LO_area <- df_decode_LO_area %>%
filter(str_detect(Stimuli, "vs.", negate = TRUE),
Layout != "Intact\nvs.\nExchange") %>%
mutate(Stimuli = as_factor(Stimuli))
df_parts_LO_area$Stimuli <- fct_relevel(df_parts_LO_area$Stimuli, "English", after = Inf)
df_parts_LO_area$Stimuli <- fct_relevel(df_parts_LO_area$Stimuli, "Chinese", after = Inf)
dat_parts_inex_LO_area <- data.frame(
Stimuli = levels(df_parts_LO_area$Stimuli),
Hemisphere = c("left"),
label = c("Chinese speakers: \nChinese faces",
"Chinese speakers: \nChinese characters",
"English speakers: \nEnglish words",
"English speakers: \nChinese characters"),
x = .5, # c(1, 1.1, 1.2, 1.2),
y = 1.05
)
plot_topbottom_LO_area <- ggplot(df_parts_LO_area, aes(y = mean, x = Area)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(title = "Top vs. bottom; left vs. right", x = expression(paste("Lateral Occipital Label Area (", mm^2, ")")), y = "Accuracy") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_parts_inex_LO_area, mapping = aes(x = x, y = y, label = label), size = 6, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_decode_LO_area_topbottom.pdf', plot_topbottom_LO_area, width = 8, height = 16)
plot_topbottom_LO_area
dat_text_tb_all_lr_E1 <- data.frame(
Stimuli = levels(df_parts_LO_area$Stimuli)[1:2],
Hemisphere = "left",
label = c("Chinese speakers: \nChinese faces",
"Chinese speakers: \nChinese characters"), # levels(df_parts_LO_area$Stimuli)[1:2],
x = .5, # c(1, 1.1),
y = 1.05
)
plot_topbottom_LO_area_E1 <- ggplot(filter(df_parts_LO_area, Exp == "E1"), aes(y = mean, x = Area)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = expression(paste("Lateral Occipital Label Area (", mm^2, ")")), y = "Accuracy") + # set the names for main, x and y axises Experiment 1
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_tb_all_lr_E1, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
dat_text_tb_all_lr_E2 <- data.frame(
Stimuli = levels(df_parts_LO_area$Stimuli)[3:4],
Hemisphere = "left",
label = c("English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_parts_LO_area$Stimuli)[3:4],
x = .5, # c(1.1, 1.2),
y = 1.05
)
plot_topbottom_LO_area_E2 <- ggplot(filter(df_parts_LO_area, Exp == "E2"), aes(y = mean, x = Area)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = expression(paste("Lateral Occipital Label Area (", mm^2, ")")), y = "Accuracy") + # set the names for main, x and y axises Experiment 2
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_tb_all_lr_E2, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
plot_topbottom_LO_area_lr <- ggarrange(plot_topbottom_LO_area_E1, plot_topbottom_LO_area_E2, ncol = 2,
# labels = c("", "Top vs. bottom; left vs. right"),
# label.x = -0.6,
# label.y = 1,
font.label = list(size = 24))
# ggsave('plot_decode_LO_area_topbottom_lr.pdf', plot_topbottom_LO_area_lr, width = 15, height = 8)
plot_topbottom_LO_area_lr
# combine all decoding results
df_decoding <- rbind(mutate(one_decode_agg_E1_FFA1, Exp = "E1", ROI = "FFA1"),
mutate(one_decode_agg_E1_FFA2, Exp = "E1", ROI = "FFA2"),
mutate(one_decode_agg_E1_VWFA, Exp = "E1", ROI = "VWFA"),
mutate(one_decode_agg_E1_LO, Exp = "E1", ROI = "LO"),
mutate(one_decode_agg_E2_FFA1, Exp = "E2", ROI = "FFA1"),
mutate(one_decode_agg_E2_FFA2, Exp = "E2", ROI = "FFA2"),
mutate(one_decode_agg_E2_VWFA, Exp = "E2", ROI = "VWFA"),
mutate(one_decode_agg_E2_LO, Exp = "E2", ROI = "LO")) %>%
separate(ClassifyPair, c("Stimuli1", "Layout1", "Stimuli2", "Layout2")) %>%
mutate(Stimuli1 = if_else(Stimuli1 %in% c("face", "word"), paste0(Stimuli1, "s"), Stimuli1),
Stimuli2 = if_else(Stimuli2 %in% c("face", "word"), paste0(Stimuli2, "s"), Stimuli2),
Stimuli = ifelse(Stimuli1 == Stimuli2, Stimuli1,
paste(Stimuli1, Stimuli2, sep = "\nvs.\n")),
Layout = ifelse(Layout1 == Layout2, Layout1,
paste(toTitleCase(Layout1), toTitleCase(Layout2), sep = "\nvs.\n"))) %>%
select(-c(Stimuli1, Stimuli2, Layout1, Layout2))
# df_decoding$ROI <- fct_relevel(df_decoding$ROI, "LO", after = Inf)
# xaxislabel <- strsplit(unique(df_0$Stimuli), "\nvs.\n")[[1]]
# save the df for intact English vs. intact Chinese
df_intact <- filter(df_decoding, Layout == "intact") %>%
mutate(Stimuli = fct_rev(Stimuli))
dat_text_intact_all <- data.frame(
Stimuli = c("faces\nvs.\nwords", "English\nvs.\nChinese"),
Hemisphere = c("left"),
label = c("Chinese speakers: \nChinese faces vs. characters",
"English speakers: \nEnglish words vs. Chinese characters"),
x = 0.5,
y = 1.05
)
# intact
plot_intact_all <- ggplot(df_intact, aes(y = mean, x = ROI)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_intact_all, aes(x = x, y = y, label = label), hjust = 0, size = 4, fontface = "bold") + #
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_decode_all_intact.pdf', plot_intact_all, width = 8, height = 8)
plot_intact_all
df_inex <- df_decoding %>%
filter(str_detect(Stimuli, "vs.", negate = TRUE),
Layout == "Intact\nvs.\nExchange")
df_inex$Stimuli <- fct_relevel(df_inex$Stimuli, "English", after = Inf)
df_inex$Stimuli <- fct_relevel(df_inex$Stimuli, "Chinese", after = Inf)
dat_text_inex_all <- data.frame(
Stimuli = levels(df_inex$Stimuli),
Hemisphere = c("left"),
label = c("Chinese speakers: \nChinese faces",
"Chinese speakers: \nChinese characters",
"English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_inex$Stimuli),
x = .5,
y = 1.05
)
plot_inex_all <- ggplot(df_inex, aes(y = mean, x = ROI)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises title = "Intact vs. Exchange",
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust = 0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 13), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_decode_all_inex.pdf', plot_inex_all, width = 8, height = 16)
plot_inex_all
dat_text_inex_all_lr_fw <- data.frame(
Stimuli = levels(df_inex$Stimuli)[1:2],
Hemisphere = "left",
label = c("Chinese speakers: \nChinese faces", # intact vs. exchanged
"Chinese speakers: \nChinese characters"), # intact vs. exchanged levels(df_inex$Stimuli)[1:2],
x = .5,
y = 1.05
)
plot_inex_E1 <- ggplot(filter(df_inex, Exp == "E1"), aes(y = mean, x = ROI)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises Experiment 1 title = "",
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all_lr_fw, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust = 0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
dat_text_inex_all_lr_ec <- data.frame(
Stimuli = levels(df_inex$Stimuli)[3:4],
Hemisphere = "left",
label = c("English speakers: \nEnglish words", # intact vs. exchanged
"English speakers: \nChinese characters"), # intact vs. exchanged levels(df_inex$Stimuli)[3:4],
x = .5,
y = 1.05
)
plot_inex_E2 <- ggplot(filter(df_inex, Exp == "E2"), aes(y = mean, x = ROI)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises Experiment 2 title = "",
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all_lr_ec, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust = 0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
plot_inex_lr <- ggarrange(plot_inex_E1, plot_inex_E2, ncol = 2,
# labels = c("", "Intact vs. Exchange"),
# label.x = -0.36,
# label.y = 1,
font.label = list(size = 24))
# ggsave('plot_decode_all_inex_lr.pdf', plot_inex_lr, width = 15, height = 8)
plot_inex_lr
df_parts <- df_decoding %>%
filter(str_detect(Stimuli, "vs.", negate = TRUE),
Layout != "Intact\nvs.\nExchange") %>%
mutate(Stimuli = as_factor(Stimuli))
df_parts$Stimuli <- fct_relevel(df_parts$Stimuli, "English", after = Inf)
df_parts$Stimuli <- fct_relevel(df_parts$Stimuli, "Chinese", after = Inf)
dat_parts_inex_all <- data.frame(
Stimuli = levels(df_parts$Stimuli),
Hemisphere = c("left"),
label = c("Chinese speakers: \nChinese faces",
"Chinese speakers: \nChinese characters",
"English speakers: \nEnglish words",
"English speakers: \nChinese characters"), #levels(df_parts$Stimuli),
x = .5, # c(1, 1.1, 1.2, 1.2),
y = 1.05
)
plot_topbottom_all <- ggplot(df_parts, aes(y = mean, x = ROI)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(title = "Top vs. bottom; left vs. right", x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_parts_inex_all, mapping = aes(x = x, y = y, label = label), size = 6, fontface = "bold", hjust = 0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_decode_all_topbottom.pdf', plot_topbottom_all, width = 8, height = 16)
plot_topbottom_all
dat_text_tb_all_lr_E1 <- data.frame(
Stimuli = levels(df_parts$Stimuli)[1:2],
Hemisphere = "left",
label = c("Chinese speakers: \nChinese faces",
"Chinese speakers: \nChinese characters"), #levels(df_parts$Stimuli)[1:2],
x = .5,
y = 1.05
)
plot_topbottom_E1 <- ggplot(filter(df_parts, Exp == "E1"), aes(y = mean, x = ROI)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises Experiment 1
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_tb_all_lr_E1, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust =0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
dat_text_tb_all_lr_E2 <- data.frame(
Stimuli = levels(df_parts$Stimuli)[3:4],
Hemisphere = "left",
label = c("English speakers: \nEnglish words",
"English speakers: \nChinese characters"), #levels(df_parts$Stimuli)[3:4],
x = .5,
y = 1.05
)
plot_topbottom_E2 <- ggplot(filter(df_parts, Exp == "E2"), aes(y = mean, x = ROI)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
geom_hline(yintercept = 0.5, linetype = 5, alpha = 0.5) + # add the line for 0.5 and 1 (y)
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0.4, 1.1)) +
labs(x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises Experiment 2
geom_text(aes(label = sig_ast(p)), size = 7, nudge_y = 0.15) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_tb_all_lr_E2, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 15), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
plot_topbottom_lr <- ggarrange(plot_topbottom_E1, plot_topbottom_E2, ncol = 2,
# labels = c("", "Top vs. bottom; left vs. right"),
# label.x = -0.6,
# label.y = 1,
font.label = list(size = 24))
# ggsave('plot_decode_all_topbottom_lr.pdf', plot_topbottom_lr, width = 15, height = 8)
plot_topbottom_lr
df_uni_ie <- rbind(mutate(desp_uni_E1_FFA1_ie, Exp = "E1", ROI = "FFA1"),
mutate(desp_uni_E1_FFA2_ie, Exp = "E1", ROI = "FFA2"),
mutate(desp_uni_E1_VWFA_ie, Exp = "E1", ROI = "VWFA"),
mutate(desp_uni_E1_LO_ie, Exp = "E1", ROI = "LO"),
mutate(desp_uni_E2_FFA1_ie, Exp = "E2", ROI = "FFA1"),
mutate(desp_uni_E2_FFA2_ie, Exp = "E2", ROI = "FFA2"),
mutate(desp_uni_E2_VWFA_ie, Exp = "E2", ROI = "VWFA"),
mutate(desp_uni_E2_LO_ie, Exp = "E2", ROI = "LO")) %>%
mutate(Stimuli = FaceWord,
HemiROI = paste0(toTitleCase(substr(Hemisphere,1,1)), ROI))
df_uni_ie_sig <- rbind(mutate(as_tibble(simple_E1_lFFA1_ie), Exp = "E1", ROI = "FFA1", Hemisphere = "left"),
mutate(as_tibble(simple_E1_lFFA2_ie), Exp = "E1", ROI = "FFA2", Hemisphere = "left"),
mutate(as_tibble(simple_E1_VWFA_ie), Exp = "E1", ROI = "VWFA", Hemisphere = "left"),
mutate(as_tibble(simple_E1_lLO_ie), Exp = "E1", ROI = "LO", Hemisphere = "left"),
mutate(as_tibble(simple_E2_lFFA1_ie), Exp = "E2", ROI = "FFA1", Hemisphere = "left"),
mutate(as_tibble(simple_E2_lFFA2_ie), Exp = "E2", ROI = "FFA2", Hemisphere = "left"),
mutate(as_tibble(simple_E2_VWFA_ie), Exp = "E2", ROI = "VWFA", Hemisphere = "left"),
mutate(as_tibble(simple_E2_lLO_ie), Exp = "E2", ROI = "LO", Hemisphere = "left"),
mutate(as_tibble(simple_E1_rFFA1_ie), Exp = "E1", ROI = "FFA1", Hemisphere = "right"),
mutate(as_tibble(simple_E1_rFFA2_ie), Exp = "E1", ROI = "FFA2", Hemisphere = "right"),
mutate(as_tibble(simple_E1_rLO_ie), Exp = "E1", ROI = "LO", Hemisphere = "right"),
mutate(as_tibble(simple_E2_rFFA1_ie), Exp = "E2", ROI = "FFA1", Hemisphere = "right"),
mutate(as_tibble(simple_E2_rFFA2_ie), Exp = "E2", ROI = "FFA2", Hemisphere = "right"),
mutate(as_tibble(simple_E2_rLO_ie), Exp = "E2", ROI = "LO", Hemisphere = "right")) %>%
filter(FaceWord != ".") %>%
mutate(Stimuli = FaceWord,
HemiROI = paste0(toTitleCase(substr(Hemisphere,1,1)), ROI),
Layout = "intact") %>%
select(Exp, Layout, Stimuli, HemiROI, p.value)
df_uni_ie <- left_join(df_uni_ie, df_uni_ie_sig) %>%
mutate(p.value = if_else(is.na(p.value), 1, p.value),
Stimuli = as_factor(Stimuli))
df_uni_ie$Layout <- fct_relevel(df_uni_ie$Layout, "exchange", after = Inf)
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "English", after = Inf)
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "Chinese", after = Inf)
dat_text_inex_all <- data.frame(
Stimuli = levels(df_uni_ie$Stimuli),
HemiROI = c("lFFA1"),
label = c("Chinese speakers: \nChinese faces",
"Chinese speakers: \nChinese characters",
"English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_uni_ie$Stimuli),
x = .5, # c(1, 1.1, 1.2, 1.2),
y = 2.65
)
plot_uni_inex_all <- ggplot(df_uni_ie, aes(y = emmean, x = Layout)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ HemiROI, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
# scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0, activationUL)) +
labs(title = "Intact vs. Exchange", x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p.value)), size = 7, nudge_y = 0.65, nudge_x = 0.5) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all, mapping = aes(x = x, y = y, label = label), size = 7, fontface = "bold", hjust =0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 13), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_uni_all_inex.pdf', plot_uni_inex_all, width = 8, height = 16)
plot_uni_inex_all
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "English", after = Inf)
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "Chinese", after = Inf)
dat_text_inex_all_ <- data.frame(
Stimuli = levels(df_uni_ie$Stimuli),
Hemisphere = "left",
Layout = "intact",
label = c("Chinese speakers: \nChinese faces",
"Chinese speakers: \nChinese characters",
"English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_uni_ie$Stimuli),
x = .5, #c(1, 1.1, 1.2, 1.2)-0.3,
y = 2.65
)
plot_uni_inex_all_ <- ggplot(df_uni_ie, aes(y = emmean, x = ROI, fill = Layout)) +
geom_col(position = "dodge", width = .5) + # , fill = "#CDCDC8"
scale_fill_manual(values = c("#C0C0C0", "#A9A9A9")) +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.5)) +
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
# scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0, activationUL)) +
labs(title = "Intact vs. Exchange", x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p.value)), size = 7, nudge_y = 0.65) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all_, mapping = aes(x = x, y = y, label = label), size = 7, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
text = element_text(colour="black", size = 18),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
# axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
legend.position = "bottom",
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 13), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_uni_all_inex.pdf', plot_uni_inex_all, width = 8, height = 16)
plot_uni_inex_all_
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "English", after = Inf)
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "Chinese", after = Inf)
df_uni_ie_1 <- filter(df_uni_ie, Stimuli %in% c("faces", "words")) %>%
droplevels()
dat_text_inex_all_lr1 <- data.frame(
Stimuli = levels(df_uni_ie_1$Stimuli),
Hemisphere = "left",
Layout = "intact",
label = c("Chinese speakers: \nChinese faces",
"Chinese speakers: \nChinese characters"), # levels(df_uni_ie_1$Stimuli),
x = .5, # c(1, 1.1),
y = 2.65
)
plot_uni_inex_all_lr1 <- ggplot(df_uni_ie_1, aes(y = emmean, x = ROI, fill = Layout)) +
geom_col(position = "dodge", width = .5) + # , fill = "#CDCDC8"
scale_fill_manual(values = c("#CDCDC8", "#A9A9A9")) +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.5)) +
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
# scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0, activationUL)) +
labs(x = "ROIs", y = "Beta values") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p.value)), size = 7, nudge_y = 0.65) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all_lr1, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
text = element_text(colour="black", size = 18),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 18),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
legend.position = "bottom",
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 18), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
df_uni_ie_2 <- filter(df_uni_ie, Stimuli %in% c("English", "Chinese")) %>%
droplevels()
dat_text_inex_all_lr2 <- data.frame(
Stimuli = levels(df_uni_ie_2$Stimuli),
Hemisphere = "left",
Layout = "intact",
label = c("English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_uni_ie_2$Stimuli),
x = .5, #c(1.1, 1.2),
y = 2.65
)
plot_uni_inex_all_lr2 <- ggplot(df_uni_ie_2, aes(y = emmean, x = ROI, fill = Layout)) +
geom_col(position = "dodge", width = .5) + # , fill = "#CDCDC8"
scale_fill_manual(values = c("#CDCDC8", "#A9A9A9")) +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.5)) +
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
# scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0, activationUL)) +
labs(x = "ROIs", y = "Beta values") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p.value)), size = 7, nudge_y = 0.65) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all_lr2, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
text = element_text(colour="black", size = 18),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 18),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
legend.position = "bottom",
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 18), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
plot_uni_ie_lr <- ggarrange(plot_uni_inex_all_lr1, plot_uni_inex_all_lr2, ncol = 2,
# labels = c("", "Intact vs. Exchange"),
# label.x = -0.36,
# label.y = 1,
font.label = list(size = 24))
# ggsave('plot_uni_all_ie_lr.pdf', plot_uni_ie_lr, width = 15, height = 9)
plot_uni_ie_lr
df_uni_tb <- rbind(mutate(desp_uni_E1_FFA1_tb, Exp = "E1", ROI = "FFA1"),
mutate(desp_uni_E1_FFA2_tb, Exp = "E1", ROI = "FFA2"),
mutate(desp_uni_E1_VWFA_tb, Exp = "E1", ROI = "VWFA"),
mutate(desp_uni_E1_LO_tb, Exp = "E1", ROI = "LO"),
mutate(desp_uni_E2_FFA1_tb, Exp = "E2", ROI = "FFA1"),
mutate(desp_uni_E2_FFA2_tb, Exp = "E2", ROI = "FFA2"),
mutate(desp_uni_E2_VWFA_tb, Exp = "E2", ROI = "VWFA"),
mutate(desp_uni_E2_LO_tb, Exp = "E2", ROI = "LO")) %>%
mutate(Stimuli = FaceWord,
HemiROI = paste0(toTitleCase(substr(Hemisphere,1,1)), ROI))
df_uni_tb_sig <- rbind(mutate(as_tibble(simple_E1_lFFA1_tb), Exp = "E1", ROI = "FFA1", Hemisphere = "left"),
mutate(as_tibble(simple_E1_lFFA2_tb), Exp = "E1", ROI = "FFA2", Hemisphere = "left"),
mutate(as_tibble(simple_E1_VWFA_tb), Exp = "E1", ROI = "VWFA", Hemisphere = "left"),
mutate(as_tibble(simple_E1_lLO_tb), Exp = "E1", ROI = "LO", Hemisphere = "left"),
mutate(as_tibble(simple_E2_lFFA1_tb), Exp = "E2", ROI = "FFA1", Hemisphere = "left"),
mutate(as_tibble(simple_E2_lFFA2_tb), Exp = "E2", ROI = "FFA2", Hemisphere = "left"),
mutate(as_tibble(simple_E2_VWFA_tb), Exp = "E2", ROI = "VWFA", Hemisphere = "left"),
mutate(as_tibble(simple_E2_lLO_tb), Exp = "E2", ROI = "LO", Hemisphere = "left"),
mutate(as_tibble(simple_E1_rFFA1_tb), Exp = "E1", ROI = "FFA1", Hemisphere = "right"),
mutate(as_tibble(simple_E1_rFFA2_tb), Exp = "E1", ROI = "FFA2", Hemisphere = "right"),
mutate(as_tibble(simple_E1_rLO_tb), Exp = "E1", ROI = "LO", Hemisphere = "right"),
mutate(as_tibble(simple_E2_rFFA1_tb), Exp = "E2", ROI = "FFA1", Hemisphere = "right"),
mutate(as_tibble(simple_E2_rFFA2_tb), Exp = "E2", ROI = "FFA2", Hemisphere = "right"),
mutate(as_tibble(simple_E2_rLO_tb), Exp = "E2", ROI = "LO", Hemisphere = "right")) %>%
filter(FaceWord != ".") %>%
mutate(Stimuli = FaceWord,
HemiROI = paste0(toTitleCase(substr(Hemisphere,1,1)), ROI),
Layout = case_when(Exp == "E1" ~ "top",
Exp == "E2" ~ "partA")) %>%
select(Exp, Layout, Stimuli, HemiROI, p.value)
df_uni_tb <- left_join(df_uni_tb, df_uni_tb_sig) %>%
mutate(p.value = if_else(is.na(p.value), 1, p.value),
Stimuli = as_factor(Stimuli))
# df_uni_tb$Layout <- fct_relevel(df_uni_tb$Layout, "exchange", after = Inf)
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "English", after = Inf)
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "Chinese", after = Inf)
dat_text_inex_all <- data.frame(
Stimuli = levels(df_uni_tb$Stimuli),
HemiROI = c("lFFA1"),
label = c("Chinese speakers: \nChinese faces",
"Chinese speakers: \nChinese characters",
"English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_uni_tb$Stimuli),
x = .5, # c(1, 1.1, 1.2, 1.2),
y = 2.65
)
df_uni_tb_all <- df_uni_tb %>%
mutate(Layout = case_when(Layout %in% c("top", "partA") ~ "partA",
Layout %in% c("bottom", "partB") ~ "partB"))
plot_uni_inex_all <- ggplot(df_uni_tb_all, aes(y = emmean, x = Layout)) +
geom_col(position = "dodge", width = .5, fill = "#CDCDC8") +
facet_grid(Stimuli ~ HemiROI, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.9)) +
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
# scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0, activationUL)) +
labs(x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises title = "Top vs. Bottom; Left vs. Right",
geom_text(aes(label = sig_ast(p.value)), size = 7, nudge_y = 0.65, nudge_x = 0.5) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all, mapping = aes(x = x, y = y, label = label), size = 7, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
# plot.margin = margin(5, 170, 60, 40, unit = "pt"),
text = element_text(colour="black"),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 13), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_uni_all_inex.pdf', plot_uni_inex_all, width = 8, height = 16)
plot_uni_inex_all
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "English", after = Inf)
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "Chinese", after = Inf)
dat_text_inex_all_ <- data.frame(
Stimuli = levels(df_uni_tb$Stimuli),
Hemisphere = "left",
Layout = "partA",
label = c("Chinese speakers: \nChinese faces",
"Chinese speakers: \nChinese characters",
"English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_uni_tb$Stimuli),
x = .5, # c(1, 1.1, 1.2, 1.2)-0.3,
y = 2.65
)
plot_uni_inex_all_ <- ggplot(df_uni_tb_all, aes(y = emmean, x = ROI, fill = Layout)) +
geom_col(position = "dodge", width = .5) + # , fill = "#CDCDC8"
scale_fill_manual(values = c("#C0C0C0", "#A9A9A9")) +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.5)) +
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
# scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0, activationUL)) +
labs(title = "Top vs. Bottom; Left vs. Right", x = "ROIs", y = "Accuracy") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p.value)), size = 7, nudge_y = 0.65) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all_, mapping = aes(x = x, y = y, label = label), size = 7, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
text = element_text(colour="black", size = 18),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
# axis.text.y = element_text(size = 13),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
legend.position = "bottom",
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 13), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
# ggsave('plot_uni_all_inex.pdf', plot_uni_inex_all, width = 8, height = 16)
plot_uni_inex_all_
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "English", after = Inf)
# df_inex_uni$Stimuli <- fct_relevel(df_inex_uni$Stimuli, "Chinese", after = Inf)
df_uni_tb_1 <- filter(df_uni_tb_all, Stimuli %in% c("faces", "words")) %>%
droplevels()
dat_text_inex_all_lr1 <- data.frame(
Stimuli = levels(df_uni_tb_1$Stimuli),
Hemisphere = "left",
Layout = "partA",
label = c("Chinese speakers: \nChinese faces",
"Chinese speakers: \nChinese characters"), #levels(df_uni_tb_1$Stimuli),
x = .5, # c(1, 1.1),
y = 2.65
)
plot_uni_inex_all_lr1 <- ggplot(df_uni_tb_1, aes(y = emmean, x = ROI, fill = Layout)) +
geom_col(position = "dodge", width = .5) + # , fill = "#CDCDC8"
scale_fill_manual(values = c("#CDCDC8", "#A9A9A9")) +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.5)) +
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
# scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0, activationUL)) +
labs(x = "ROIs", y = "Beta values") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p.value)), size = 7, nudge_y = 0.65) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all_lr1, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
text = element_text(colour="black", size = 18),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 18),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
legend.position = "bottom",
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 18), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
df_uni_tb_2 <- filter(df_uni_tb_all, Stimuli %in% c("English", "Chinese")) %>%
droplevels()
dat_text_inex_all_lr2 <- data.frame(
Stimuli = levels(df_uni_tb_2$Stimuli),
Hemisphere = "left",
Layout = "partA",
label = c("English speakers: \nEnglish words",
"English speakers: \nChinese characters"), # levels(df_uni_tb_2$Stimuli),
x = .5, # c(1.1, 1.2),
y = 2.65
)
plot_uni_inex_all_lr2 <- ggplot(df_uni_tb_2, aes(y = emmean, x = ROI, fill = Layout)) +
geom_col(position = "dodge", width = .5) + # , fill = "#CDCDC8"
scale_fill_manual(values = c("#CDCDC8", "#A9A9A9")) +
facet_grid(Stimuli ~ Hemisphere, scales = "free_x", space = "free_x",
switch = "x",
labeller = labeller(Hemisphere = c(left = "left hemisphere", right = "right hemisphere"))) +
geom_errorbar(mapping = aes(ymin = lower.CL, ymax = upper.CL), linetype = 1, # set the error bar
show.legend = FALSE, width = 0.25, alpha = .5,
position = position_dodge(width=0.5)) +
# scale_x_discrete(labels = toTitleCase(df_0$Stimuli)) +
# scale_y_continuous(expand= c(0, 0), breaks = seq(0, 1, .25)) + # remove the space between columns and x axis
coord_cartesian(ylim = c(0, activationUL)) +
labs(x = "ROIs", y = "Beta values") + # set the names for main, x and y axises
geom_text(aes(label = sig_ast(p.value)), size = 7, nudge_y = 0.65) + # add starts to the significant columns
# geom_text(aes(label = round(mean, 2)), size = 4, nudge_y = 0.2) +
geom_text(data = dat_text_inex_all_lr2, mapping = aes(x = x, y = y, label = label), size = 4.5, fontface = "bold", hjust=0) +
theme_bw() +
theme(
plot.title = element_text(lineheight=.8, face="bold", size = 24, hjust = 0.5, vjust = -1),
text = element_text(colour="black", size = 18),
axis.text = element_text(colour="black"),
axis.text.x = element_text(face = "bold", size = 16),
axis.text.y = element_text(size = 18),
axis.title.x = element_text(face = "bold", size = 20), # the size of the texts in plot
axis.title.y = element_text(size = 17, vjust = 2.5), # the size of the texts in plot
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'), # , arrow = arrow(length = unit(0.3, "cm"))
# axis.text.x = element_text(angle = 45, vjust = 0.5),
legend.position = "bottom",
panel.border = element_blank(),
panel.grid.minor = element_blank(),
panel.grid.major = element_blank(),
panel.spacing = unit(1.5, "lines"),
# remove the facet background color
strip.text.x = element_text(size = 18), # element_blank(),
strip.text.y = element_blank(),
strip.background = element_blank(),
strip.placement = "outside",
) +
NULL
plot_uni_tb_lr <- ggarrange(plot_uni_inex_all_lr1, plot_uni_inex_all_lr2, ncol = 2,
# labels = c("", "Top vs. Bottom; Left vs. Right"),
# label.x = -0.6,
# label.y = 1,
font.label = list(size = 24))
# ggsave('plot_uni_all_tb_lr.pdf', plot_uni_tb_lr, width = 15, height = 9)
plot_uni_tb_lr
# rstudioapi::versionInfo()
sessionInfo()
## R version 3.6.3 (2020-02-29)
## Platform: x86_64-apple-darwin15.6.0 (64-bit)
## Running under: macOS Mojave 10.14.5
##
## Matrix products: default
## BLAS: /Library/Frameworks/R.framework/Versions/3.6/Resources/lib/libRblas.0.dylib
## LAPACK: /Library/Frameworks/R.framework/Versions/3.6/Resources/lib/libRlapack.dylib
##
## locale:
## [1] en_US.UTF-8/en_US.UTF-8/en_US.UTF-8/C/en_US.UTF-8/en_US.UTF-8
##
## attached base packages:
## [1] tools stats graphics grDevices utils datasets methods base
##
## other attached packages:
## [1] ggpubr_0.2.5 magrittr_2.0.1 emmeans_1.4.7 lmerTest_3.1-0 afex_0.25-1 lme4_1.1-21 Matrix_1.2-18 forcats_0.4.0 stringr_1.4.0 dplyr_0.8.5 purrr_0.3.3 readr_1.3.1 tidyr_1.0.2 tibble_3.0.1 ggplot2_3.3.0 tidyverse_1.2.1
##
## loaded via a namespace (and not attached):
## [1] httr_1.4.1 jsonlite_1.7.1 splines_3.6.3 carData_3.0-3 modelr_0.1.5 assertthat_0.2.1 cellranger_1.1.0 yaml_2.2.1 numDeriv_2016.8-1.1 pillar_1.4.4 backports_1.1.5 lattice_0.20-38 glue_1.4.2 digest_0.6.27 ggsignif_0.6.0 rvest_0.3.5 minqa_1.2.4 colorspace_1.4-1 cowplot_1.0.0 htmltools_0.5.0 plyr_1.8.6 pkgconfig_2.0.3
## [23] broom_0.5.3.9000 haven_2.2.0 xtable_1.8-4 mvtnorm_1.0-11 scales_1.0.0 openxlsx_4.1.3 rio_0.5.16 generics_0.0.2 car_3.0-5 ellipsis_0.3.1 withr_2.1.2 cli_2.0.2 crayon_1.3.4 readxl_1.3.1 estimability_1.3 evaluate_0.14 fansi_0.4.1 nlme_3.1-144 MASS_7.3-51.5 xml2_1.2.2 foreign_0.8-75 data.table_1.12.6
## [45] hms_0.5.3 lifecycle_0.2.0 munsell_0.5.0 zip_2.0.4 compiler_3.6.3 rlang_0.4.8 grid_3.6.3 nloptr_1.2.1 rstudioapi_0.11 labeling_0.3 rmarkdown_2.1 boot_1.3-24 gtable_0.3.0 abind_1.4-5 curl_4.3 reshape2_1.4.3 R6_2.4.1 lubridate_1.7.4 knitr_1.30 stringi_1.5.3 parallel_3.6.3 Rcpp_1.0.4.6
## [67] vctrs_0.3.1 tidyselect_1.0.0 xfun_0.19 coda_0.19-3
A work by Haiyang Jin